summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAlberto Lerner <alerner@10gen.com>2012-12-20 20:29:10 -0500
committerAlberto Lerner <alerner@10gen.com>2012-12-21 17:53:05 -0500
commite67651f117934fa920fc3ab1b2203baf9765afa1 (patch)
tree883b9ec7c157970c7eeefbd6d19c03e022cf755a /src
parentc98c4f07be6838fb98280756526d5eabf56f8aea (diff)
downloadmongo-e67651f117934fa920fc3ab1b2203baf9765afa1.tar.gz
Moved ShardChunkVersion to its own module and renamed.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/dbclient.cpp2
-rw-r--r--src/mongo/client/dbclientcursor.cpp2
-rw-r--r--src/mongo/client/parallel.cpp24
-rw-r--r--src/mongo/client/parallel.h2
-rw-r--r--src/mongo/db/client.cpp8
-rw-r--r--src/mongo/db/dbcommands.cpp1
-rw-r--r--src/mongo/db/instance.cpp1
-rw-r--r--src/mongo/db/ops/query.cpp29
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp1
-rw-r--r--src/mongo/dbtests/config_upgrade_tests.cpp6
-rw-r--r--src/mongo/dbtests/d_chunk_manager_tests.cpp27
-rw-r--r--src/mongo/dbtests/sharding.cpp111
-rw-r--r--src/mongo/s/SConscript5
-rw-r--r--src/mongo/s/chunk.cpp55
-rw-r--r--src/mongo/s/chunk.h32
-rw-r--r--src/mongo/s/chunk_diff.h21
-rw-r--r--src/mongo/s/chunk_diff.hpp17
-rw-r--r--src/mongo/s/chunk_version.h319
-rw-r--r--src/mongo/s/chunk_version_test.cpp88
-rw-r--r--src/mongo/s/collection_manager.cpp6
-rw-r--r--src/mongo/s/collection_manager.h18
-rw-r--r--src/mongo/s/collection_manager_test.cpp6
-rw-r--r--src/mongo/s/config.cpp9
-rw-r--r--src/mongo/s/d_chunk_manager.cpp20
-rw-r--r--src/mongo/s/d_chunk_manager.h20
-rw-r--r--src/mongo/s/d_logic.h17
-rw-r--r--src/mongo/s/d_migrate.cpp15
-rw-r--r--src/mongo/s/d_split.cpp15
-rw-r--r--src/mongo/s/d_state.cpp39
-rw-r--r--src/mongo/s/default_version.cpp3
-rw-r--r--src/mongo/s/metadata_loader.cpp19
-rw-r--r--src/mongo/s/shard_version.cpp17
-rw-r--r--src/mongo/s/strategy.cpp6
-rw-r--r--src/mongo/s/strategy_shard.cpp9
-rw-r--r--src/mongo/s/type_chunk.cpp14
-rw-r--r--src/mongo/s/type_chunk.h21
-rw-r--r--src/mongo/s/type_chunk_test.cpp8
-rw-r--r--src/mongo/s/util.h319
-rw-r--r--src/mongo/s/writeback_listener.cpp27
39 files changed, 711 insertions, 648 deletions
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index 041f712fc47..3e3c40be2f8 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -26,7 +26,7 @@
#include "mongo/db/json.h"
#include "mongo/db/namespace-inl.h"
#include "mongo/db/namespacestring.h"
-#include "mongo/s/util.h"
+#include "mongo/s/util.h" // for RecvStaleConfigException
#include "mongo/util/md5.hpp"
#ifdef MONGO_SSL
diff --git a/src/mongo/client/dbclientcursor.cpp b/src/mongo/client/dbclientcursor.cpp
index d0469839ae5..13f92b8ed94 100644
--- a/src/mongo/client/dbclientcursor.cpp
+++ b/src/mongo/client/dbclientcursor.cpp
@@ -24,7 +24,7 @@
#include "mongo/db/dbmessage.h"
#include "mongo/db/namespacestring.h"
#include "mongo/s/shard.h"
-#include "mongo/s/util.h"
+#include "mongo/s/util.h" // for RecvStaleConfigException
namespace mongo {
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 1b4501ff5a6..62ad404b260 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -17,15 +17,17 @@
#include "pch.h"
-#include "parallel.h"
-#include "connpool.h"
-#include "../db/dbmessage.h"
-#include "../s/util.h"
-#include "../s/shard.h"
-#include "../s/chunk.h"
-#include "../s/config.h"
-#include "../s/grid.h"
+
+#include "mongo/client/connpool.h"
#include "mongo/client/dbclientcursor.h"
+#include "mongo/client/parallel.h"
+#include "mongo/db/dbmessage.h"
+#include "mongo/s/chunk.h"
+#include "mongo/s/chunk_version.h"
+#include "mongo/s/config.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/shard.h"
+#include "mongo/s/util.h" // for RecvStaleConfigException
namespace mongo {
@@ -125,7 +127,7 @@ namespace mongo {
if ( conn.setVersion() ) {
conn.done();
// Deprecated, so we don't care about versions here
- throw RecvStaleConfigException( _ns , "ClusteredCursor::query" , ShardChunkVersion( 0, OID() ), ShardChunkVersion( 0, OID() ), true );
+ throw RecvStaleConfigException( _ns , "ClusteredCursor::query" , ChunkVersion( 0, OID() ), ChunkVersion( 0, OID() ), true );
}
LOG(5) << "ClusteredCursor::query (" << type() << ") server:" << server
@@ -1394,7 +1396,7 @@ namespace mongo {
if ( conns[i]->setVersion() ) {
conns[i]->done();
// Version is zero b/c this is deprecated codepath
- staleConfigExs.push_back( (string)"stale config detected for " + RecvStaleConfigException( _ns , "ParallelCursor::_init" , ShardChunkVersion( 0, OID() ), ShardChunkVersion( 0, OID() ), true ).what() + errLoc );
+ staleConfigExs.push_back( (string)"stale config detected for " + RecvStaleConfigException( _ns , "ParallelCursor::_init" , ChunkVersion( 0, OID() ), ChunkVersion( 0, OID() ), true ).what() + errLoc );
break;
}
@@ -1548,7 +1550,7 @@ namespace mongo {
if( throwException && staleConfigExs.size() > 0 ){
// Version is zero b/c this is deprecated codepath
- throw RecvStaleConfigException( _ns , errMsg.str() , ShardChunkVersion( 0, OID() ), ShardChunkVersion( 0, OID() ), ! allConfigStale );
+ throw RecvStaleConfigException( _ns , errMsg.str() , ChunkVersion( 0, OID() ), ChunkVersion( 0, OID() ), ! allConfigStale );
}
else if( throwException )
throw DBException( errMsg.str(), 14827 );
diff --git a/src/mongo/client/parallel.h b/src/mongo/client/parallel.h
index cb7fdad6eaf..0ba6fee400b 100644
--- a/src/mongo/client/parallel.h
+++ b/src/mongo/client/parallel.h
@@ -25,7 +25,7 @@
#include "mongo/db/matcher.h"
#include "mongo/db/namespacestring.h"
#include "mongo/s/shard.h"
-#include "mongo/s/util.h"
+#include "mongo/s/util.h" // for StaleConfigException
#include "mongo/util/concurrency/mvar.h"
namespace mongo {
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index d4a68203b1e..d71ff163c84 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -43,9 +43,11 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/pagefault.h"
#include "mongo/db/repl/rs.h"
+#include "mongo/db/security.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/d_logic.h"
+#include "mongo/s/util.h" // for SendStaleConfigException
#include "mongo/scripting/engine.h"
-#include "mongo/db/security.h"
#include "mongo/util/file_allocator.h"
#include "mongo/util/mongoutils/checksum.h"
#include "mongo/util/mongoutils/html.h"
@@ -271,8 +273,8 @@ namespace mongo {
break;
default: {
string errmsg;
- ShardChunkVersion received;
- ShardChunkVersion wanted;
+ ChunkVersion received;
+ ChunkVersion wanted;
if ( ! shardVersionOk( _ns , errmsg, received, wanted ) ) {
ostringstream os;
os << "[" << _ns << "] shard version not ok in Client::Context: " << errmsg;
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 5a6dffc123f..0b188931b71 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/replutil.h"
#include "mongo/db/security.h"
#include "mongo/s/d_writeback.h"
+#include "mongo/s/util.h" // for SendStaleConfigException
#include "mongo/scripting/engine.h"
#include "mongo/server.h"
#include "mongo/util/lruishmap.h"
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index c7057d3ecb5..8bae4646b2a 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -57,6 +57,7 @@
#include "mongo/db/security.h"
#include "mongo/db/stats/counters.h"
#include "mongo/s/d_logic.h"
+#include "mongo/s/util.h" // for SendStaleConfigException
#include "mongo/util/file_allocator.h"
#include "mongo/util/goodies.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/ops/query.cpp b/src/mongo/db/ops/query.cpp
index 8ce42cf4e68..a9ef07bc1bd 100644
--- a/src/mongo/db/ops/query.cpp
+++ b/src/mongo/db/ops/query.cpp
@@ -17,19 +17,22 @@
*/
#include "pch.h"
-#include "query.h"
-#include "../pdfile.h"
-#include "../clientcursor.h"
-#include "../oplog.h"
-#include "../../bson/util/builder.h"
-#include "../replutil.h"
-#include "../scanandorder.h"
-#include "../commands.h"
-#include "../queryoptimizer.h"
-#include "../../s/d_logic.h"
-#include "../../server.h"
-#include "../queryoptimizercursor.h"
-#include "../pagefault.h"
+
+#include "mongo/db/ops/query.h"
+
+#include "mongo/bson/util/builder.h"
+#include "mongo/db/clientcursor.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/oplog.h"
+#include "mongo/db/pagefault.h"
+#include "mongo/db/pdfile.h"
+#include "mongo/db/queryoptimizer.h"
+#include "mongo/db/queryoptimizercursor.h"
+#include "mongo/db/replutil.h"
+#include "mongo/db/scanandorder.h"
+#include "mongo/s/d_logic.h"
+#include "mongo/s/util.h" // for SendStaleConfigException
+#include "mongo/server.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 4e932069dbf..490e8118980 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -22,6 +22,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/pipeline/document.h"
#include "mongo/s/d_logic.h"
+#include "mongo/s/util.h" // for SendStaleConfigException
namespace mongo {
diff --git a/src/mongo/dbtests/config_upgrade_tests.cpp b/src/mongo/dbtests/config_upgrade_tests.cpp
index 0ef506d357b..78891ae4d00 100644
--- a/src/mongo/dbtests/config_upgrade_tests.cpp
+++ b/src/mongo/dbtests/config_upgrade_tests.cpp
@@ -15,6 +15,7 @@
*/
#include "mongo/dbtests/config_server_fixture.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/cluster_client_internal.h"
#include "mongo/s/config_upgrade.h"
#include "mongo/s/type_mongos.h"
@@ -22,7 +23,6 @@
#include "mongo/s/type_chunk.h"
#include "mongo/s/type_shard.h"
#include "mongo/s/type_config_version.h"
-#include "mongo/s/util.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/version.h"
@@ -92,7 +92,7 @@ namespace mongo {
BSONObjBuilder bob;
bob << CollectionType::ns(ns);
- bob << CollectionType::DEPRECATED_lastmod(ShardChunkVersion(1, 0, OID()).toLong());
+ bob << CollectionType::DEPRECATED_lastmod(ChunkVersion(1, 0, OID()).toLong());
bob << CollectionType::keyPattern(BSON("_id" << 1));
if (epochForCollection) {
bob << CollectionType::DEPRECATED_lastmodEpoch(epoch);
@@ -108,7 +108,7 @@ namespace mongo {
bob << ChunkType::min(BSON("_id" << i));
bob << ChunkType::max(BSON("_id" << (i + 1)));
bob << ChunkType::shard("test");
- bob << ChunkType::DEPRECATED_lastmod(ShardChunkVersion(i + 1, 0, OID()).toLong());
+ bob << ChunkType::DEPRECATED_lastmod(ChunkVersion(i + 1, 0, OID()).toLong());
// Make sure the first chunk never has an epoch, so we can be sure there's something
// to upgrade
diff --git a/src/mongo/dbtests/d_chunk_manager_tests.cpp b/src/mongo/dbtests/d_chunk_manager_tests.cpp
index f7b6a470d76..e50961d3ee7 100644
--- a/src/mongo/dbtests/d_chunk_manager_tests.cpp
+++ b/src/mongo/dbtests/d_chunk_manager_tests.cpp
@@ -19,6 +19,7 @@
#include "pch.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/d_chunk_manager.h"
#include "mongo/s/type_chunk.h"
#include "mongo/s/type_collection.h"
@@ -215,7 +216,7 @@ namespace {
// new chunk [20,0-30,0)
BSONObj min = BSON( "a" << 20 << "b" << 0 );
BSONObj max = BSON( "a" << 30 << "b" << 0 );
- ShardChunkManagerPtr cloned( s.clonePlus( min , max , ShardChunkVersion( 1, 0, OID() ) /* TODO test version */ ) );
+ ShardChunkManagerPtr cloned( s.clonePlus( min , max , ChunkVersion( 1, 0, OID() ) /* TODO test version */ ) );
BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
ASSERT( ! cloned->belongsToMe( k1 ) );
@@ -248,7 +249,7 @@ namespace {
// [15,0-25,0) overlaps [10,0-20,0)
BSONObj min = BSON( "a" << 15 << "b" << 0 );
BSONObj max = BSON( "a" << 25 << "b" << 0 );
- ASSERT_THROWS( s.clonePlus ( min , max , ShardChunkVersion( 1, 0, OID() ) /* TODO test version */ ) , UserException );
+ ASSERT_THROWS( s.clonePlus ( min , max , ChunkVersion( 1, 0, OID() ) /* TODO test version */ ) , UserException );
}
};
@@ -276,7 +277,7 @@ namespace {
// deleting chunk [10,0-20,0)
BSONObj min = BSON( "a" << 10 << "b" << 0 );
BSONObj max = BSON( "a" << 20 << "b" << 0 );
- ShardChunkManagerPtr cloned( s.cloneMinus( min , max , ShardChunkVersion( 1, 0, OID() ) /* TODO test version */ ) );
+ ShardChunkManagerPtr cloned( s.cloneMinus( min , max , ChunkVersion( 1, 0, OID() ) /* TODO test version */ ) );
BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
ASSERT( ! cloned->belongsToMe( k1 ) );
@@ -315,13 +316,13 @@ namespace {
// deleting non-existing chunk [25,0-28,0)
BSONObj min1 = BSON( "a" << 25 << "b" << 0 );
BSONObj max1 = BSON( "a" << 28 << "b" << 0 );
- ASSERT_THROWS( s.cloneMinus( min1 , max1 , ShardChunkVersion( 1, 0, OID() ) /* TODO test version */ ) , UserException );
+ ASSERT_THROWS( s.cloneMinus( min1 , max1 , ChunkVersion( 1, 0, OID() ) /* TODO test version */ ) , UserException );
// deletin an overlapping range (not exactly a chunk) [15,0-25,0)
BSONObj min2 = BSON( "a" << 15 << "b" << 0 );
BSONObj max2 = BSON( "a" << 25 << "b" << 0 );
- ASSERT_THROWS( s.cloneMinus( min2 , max2 , ShardChunkVersion( 1, 0, OID() ) /* TODO test version */ ) , UserException );
+ ASSERT_THROWS( s.cloneMinus( min2 , max2 , ChunkVersion( 1, 0, OID() ) /* TODO test version */ ) , UserException );
}
};
@@ -349,7 +350,7 @@ namespace {
vector<BSONObj> splitKeys;
splitKeys.push_back( split1 );
splitKeys.push_back( split2 );
- ShardChunkVersion version( 1 , 99, OID() ); // first chunk 1|99 , second 1|100
+ ChunkVersion version( 1 , 99, OID() ); // first chunk 1|99 , second 1|100
ShardChunkManagerPtr cloned( s.cloneSplit( min , max , splitKeys , version ) );
version.incMinor(); /* second chunk 1|100, first split point */
@@ -386,13 +387,13 @@ namespace {
BSONObj badSplit = BSON( "a" << 5 << "b" << 0 );
vector<BSONObj> splitKeys;
splitKeys.push_back( badSplit );
- ASSERT_THROWS( s.cloneSplit( min , max , splitKeys , ShardChunkVersion( 1, 0, OID() ) ) , UserException );
+ ASSERT_THROWS( s.cloneSplit( min , max , splitKeys , ChunkVersion( 1, 0, OID() ) ) , UserException );
BSONObj badMax = BSON( "a" << 25 << "b" << 0 );
BSONObj split = BSON( "a" << 15 << "b" << 0 );
splitKeys.clear();
splitKeys.push_back( split );
- ASSERT_THROWS( s.cloneSplit( min , badMax, splitKeys , ShardChunkVersion( 1, 0, OID() ) ) , UserException );
+ ASSERT_THROWS( s.cloneSplit( min , badMax, splitKeys , ChunkVersion( 1, 0, OID() ) ) , UserException );
}
};
@@ -410,7 +411,7 @@ namespace {
// shard can have zero chunks for an existing collection
// version should be 0, though
ShardChunkManager s( collection , chunks );
- ASSERT_EQUALS( s.getVersion().toLong() , ShardChunkVersion( 0, 0, OID() ).toLong() );
+ ASSERT_EQUALS( s.getVersion().toLong() , ChunkVersion( 0, 0, OID() ).toLong() );
ASSERT_EQUALS( s.getNumChunks() , 0u );
}
};
@@ -435,17 +436,17 @@ namespace {
BSONObj max = BSON( "a" << 20 );
// if we remove the only chunk, the only version accepted is 0
- ShardChunkVersion nonZero = ShardChunkVersion( 99, 0, OID() );
+ ChunkVersion nonZero = ChunkVersion( 99, 0, OID() );
ASSERT_THROWS( s.cloneMinus( min , max , nonZero ) , UserException );
- ShardChunkManagerPtr empty( s.cloneMinus( min , max , ShardChunkVersion( 0, 0, OID() ) ) );
- ASSERT_EQUALS( empty->getVersion().toLong() , ShardChunkVersion( 0, 0, OID() ).toLong() );
+ ShardChunkManagerPtr empty( s.cloneMinus( min , max , ChunkVersion( 0, 0, OID() ) ) );
+ ASSERT_EQUALS( empty->getVersion().toLong() , ChunkVersion( 0, 0, OID() ).toLong() );
ASSERT_EQUALS( empty->getNumChunks() , 0u );
BSONObj k = BSON( "a" << 15 << "b" << 0 );
ASSERT( ! empty->belongsToMe( k ) );
// we can add a chunk to an empty manager
// version should be provided
- ASSERT_THROWS( empty->clonePlus( min , max , ShardChunkVersion( 0, 0, OID() ) ) , UserException );
+ ASSERT_THROWS( empty->clonePlus( min , max , ChunkVersion( 0, 0, OID() ) ) , UserException );
ShardChunkManagerPtr cloned( empty->clonePlus( min , max , nonZero ) );
ASSERT_EQUALS( cloned->getVersion().toLong(), nonZero.toLong() );
ASSERT_EQUALS( cloned->getNumChunks() , 1u );
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index 4a702ca7b4e..f5e43be7687 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -23,6 +23,7 @@
#include "mongo/client/dbclientmockcursor.h"
#include "mongo/client/parallel.h"
#include "mongo/s/chunk_diff.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/type_chunk.h"
namespace ShardingTests {
@@ -59,77 +60,6 @@ namespace ShardingTests {
return max > 0 ? r % max : r;
}
- /**
- * Tests parsing of BSON for versions. In version 2.2, this parsing is meant to be very
- * flexible so different formats can be tried and enforced later.
- *
- * Formats are:
- *
- * A) { vFieldName : <TSTYPE>, [ vFieldNameEpoch : <OID> ], ... }
- * B) { fieldName : [ <TSTYPE>, <OID> ], ... }
- *
- * vFieldName is a specifyable name - usually "version" (default) or "lastmod". <TSTYPE> is a
- * type convertible to Timestamp, ideally Timestamp but also numeric.
- * <OID> is a value of type OID.
- *
- */
- class ShardVersionParsingTest {
- public:
- void run(){
-
- {
- // Legacy compatibility format (A)
-
- BSONObjBuilder versionObjB;
- versionObjB.appendTimestamp( "testVersion",
- ShardChunkVersion( 1, 1, OID() ).toLong() );
- versionObjB.append( "testVersionEpoch", OID::gen() );
- BSONObj versionObj = versionObjB.obj();
-
- ShardChunkVersion parsed =
- ShardChunkVersion::fromBSON( versionObj[ "testVersion" ] );
-
- ASSERT( ShardChunkVersion::canParseBSON( versionObj[ "testVersion" ] ) );
- ASSERT( parsed.majorVersion() == 1 );
- ASSERT( parsed.minorVersion() == 1 );
- ASSERT( ! parsed.epoch().isSet() );
-
- parsed = ShardChunkVersion::fromBSON( versionObj, "testVersion" );
-
- ASSERT( ShardChunkVersion::canParseBSON( versionObj, "testVersion" ) );
- ASSERT( parsed.majorVersion() == 1 );
- ASSERT( parsed.minorVersion() == 1 );
- ASSERT( parsed.epoch().isSet() );
- }
-
- {
- // Sub-array format (B)
-
- BSONObjBuilder tsObjB;
- tsObjB.appendTimestamp( "ts", ShardChunkVersion( 1, 1, OID() ).toLong() );
- BSONObj tsObj = tsObjB.obj();
-
- BSONObjBuilder versionObjB;
- BSONArrayBuilder subArrB( versionObjB.subarrayStart( "testVersion" ) );
- // Append this weird way so we're sure we get a timestamp type
- subArrB.append( tsObj.firstElement() );
- subArrB.append( OID::gen() );
- subArrB.done();
- BSONObj versionObj = versionObjB.obj();
-
- ShardChunkVersion parsed =
- ShardChunkVersion::fromBSON( versionObj[ "testVersion" ] );
-
- ASSERT( ShardChunkVersion::canParseBSON( versionObj[ "testVersion" ] ) );
- ASSERT( ShardChunkVersion::canParseBSON( BSONArray( versionObj[ "testVersion" ].Obj() ) ) );
- ASSERT( parsed.majorVersion() == 1 );
- ASSERT( parsed.minorVersion() == 1 );
- ASSERT( parsed.epoch().isSet() );
- }
- }
-
- };
-
//
// Sets up a basic environment for loading chunks to/from the direct database connection
// Redirects connections to the direct database for the duration of the test.
@@ -208,8 +138,8 @@ namespace ShardingTests {
ASSERT(firstChunk[ChunkType::min()].Obj()[ "_id" ].type() == MinKey );
ASSERT(firstChunk[ChunkType::max()].Obj()[ "_id" ].type() == MaxKey );
- ShardChunkVersion version = ShardChunkVersion::fromBSON(firstChunk,
- ChunkType::DEPRECATED_lastmod());
+ ChunkVersion version = ChunkVersion::fromBSON(firstChunk,
+ ChunkType::DEPRECATED_lastmod());
ASSERT( version.majorVersion() == 1 );
ASSERT( version.minorVersion() == 0 );
@@ -271,8 +201,8 @@ namespace ShardingTests {
BSONObj chunk = cursor->next();
- ShardChunkVersion version = ShardChunkVersion::fromBSON(chunk,
- ChunkType::DEPRECATED_lastmod());
+ ChunkVersion version = ChunkVersion::fromBSON(chunk,
+ ChunkType::DEPRECATED_lastmod());
ASSERT( version.majorVersion() == 1 );
ASSERT( version.isEpochSet() );
@@ -305,8 +235,8 @@ namespace ShardingTests {
BSONObj firstChunk = client().findOne(ChunkType::ConfigNS, BSONObj()).getOwned();
- ShardChunkVersion version = ShardChunkVersion::fromBSON(firstChunk,
- ChunkType::DEPRECATED_lastmod());
+ ChunkVersion version = ChunkVersion::fromBSON(firstChunk,
+ ChunkType::DEPRECATED_lastmod());
// Make manager load existing chunks
ChunkManagerPtr manager( new ChunkManager( collName(), ShardKeyPattern( BSON( "_id" << 1 ) ), false ) );
@@ -318,7 +248,7 @@ namespace ShardingTests {
// Modify chunks collection
BSONObjBuilder b;
- ShardChunkVersion laterVersion = ShardChunkVersion( 2, 1, version.epoch() );
+ ChunkVersion laterVersion = ChunkVersion( 2, 1, version.epoch() );
laterVersion.addToBSON(b, ChunkType::DEPRECATED_lastmod());
client().update(ChunkType::ConfigNS, BSONObj(), BSON( "$set" << b.obj()));
@@ -340,7 +270,7 @@ namespace ShardingTests {
bool _inverse;
typedef map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
- typedef map<string, ShardChunkVersion> VersionMap;
+ typedef map<string, ChunkVersion> VersionMap;
ChunkDiffUnitTest( bool inverse ) : _inverse( inverse ) {}
@@ -381,20 +311,20 @@ namespace ShardingTests {
};
// Allow validating with and without ranges (b/c our splits won't actually be updated by the diffs)
- void validate( BSONArray chunks, ShardChunkVersion maxVersion, const VersionMap& maxShardVersions ){
+ void validate( BSONArray chunks, ChunkVersion maxVersion, const VersionMap& maxShardVersions ){
validate( chunks, NULL, maxVersion, maxShardVersions );
}
- void validate( BSONArray chunks, const RangeMap& ranges, ShardChunkVersion maxVersion, const VersionMap& maxShardVersions ){
+ void validate( BSONArray chunks, const RangeMap& ranges, ChunkVersion maxVersion, const VersionMap& maxShardVersions ){
validate( chunks, (RangeMap*)&ranges, maxVersion, maxShardVersions );
}
// Validates that the ranges and versions are valid given the chunks
- void validate( const BSONArray& chunks, RangeMap* ranges, ShardChunkVersion maxVersion, const VersionMap& maxShardVersions ){
+ void validate( const BSONArray& chunks, RangeMap* ranges, ChunkVersion maxVersion, const VersionMap& maxShardVersions ){
BSONObjIterator it( chunks );
int chunkCount = 0;
- ShardChunkVersion foundMaxVersion;
+ ChunkVersion foundMaxVersion;
VersionMap foundMaxShardVersions;
//
@@ -416,11 +346,11 @@ namespace ShardingTests {
ASSERT( chunkRange->second.woCompare( _inverse ? chunkDoc["min"].Obj() : chunkDoc["max"].Obj() ) == 0 );
}
- ShardChunkVersion version =
- ShardChunkVersion::fromBSON(chunkDoc[ChunkType::DEPRECATED_lastmod()]);
+ ChunkVersion version =
+ ChunkVersion::fromBSON(chunkDoc[ChunkType::DEPRECATED_lastmod()]);
if( version > foundMaxVersion ) foundMaxVersion = version;
- ShardChunkVersion shardMaxVersion =
+ ChunkVersion shardMaxVersion =
foundMaxShardVersions[chunkDoc[ChunkType::shard()].String()];
if( version > shardMaxVersion ) {
foundMaxShardVersions[chunkDoc[ChunkType::shard()].String() ] = version;
@@ -437,7 +367,7 @@ namespace ShardingTests {
for( VersionMap::iterator it = foundMaxShardVersions.begin(); it != foundMaxShardVersions.end(); it++ ){
- ShardChunkVersion foundVersion = it->second;
+ ChunkVersion foundVersion = it->second;
VersionMap::const_iterator maxIt = maxShardVersions.find( it->first );
ASSERT( maxIt != maxShardVersions.end() );
@@ -457,7 +387,7 @@ namespace ShardingTests {
BSONArrayBuilder chunksB;
BSONObj lastSplitPt;
- ShardChunkVersion version( 1, 0, OID() );
+ ChunkVersion version( 1, 0, OID() );
//
// Generate numChunks with a given key size over numShards
@@ -505,7 +435,7 @@ namespace ShardingTests {
// Setup the empty ranges and versions first
RangeMap ranges;
- ShardChunkVersion maxVersion = ShardChunkVersion( 0, 0, OID() );
+ ChunkVersion maxVersion = ChunkVersion( 0, 0, OID() );
VersionMap maxShardVersions;
// Create a differ which will track our progress
@@ -669,7 +599,7 @@ namespace ShardingTests {
if( rand( 10 ) < 1 ){
diffs = chunks;
ranges.clear();
- maxVersion = ShardChunkVersion( 0, 0, OID() );
+ maxVersion = ChunkVersion( 0, 0, OID() );
maxShardVersions.clear();
}
@@ -703,7 +633,6 @@ namespace ShardingTests {
void setupTests() {
add< serverandquerytests::test1 >();
- add< ShardVersionParsingTest >();
add< ChunkManagerCreateBasicTest >();
add< ChunkManagerCreateFullTest >();
add< ChunkManagerLoadBasicTest >();
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index cbbbac8c315..07b2ec020d4 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -6,7 +6,8 @@ Import("env")
# Schema and backward compatibility code for "config" collections.
#
-env.StaticLibrary('base', ['field_parser.cpp',
+env.StaticLibrary('base', [#'chunk_version.cpp',
+ 'field_parser.cpp',
'mongo_version_range.cpp',
'type_changelog.cpp',
'type_chunk.cpp',
@@ -22,6 +23,8 @@ env.StaticLibrary('base', ['field_parser.cpp',
LIBDEPS=['$BUILD_DIR/mongo/base/base',
'$BUILD_DIR/mongo/bson'])
+env.CppUnitTest('chunk_version_test', 'chunk_version_test.cpp', LIBDEPS=['base'])
+
env.CppUnitTest('field_parser_test', 'field_parser_test.cpp', LIBDEPS=['base'])
env.CppUnitTest('mongo_version_range_test', 'mongo_version_range_test.cpp',
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index d13985258e8..cbdfde14357 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -25,6 +25,7 @@
#include "mongo/db/queryutil.h"
#include "mongo/platform/random.h"
#include "mongo/s/chunk_diff.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/client_info.h"
#include "mongo/s/config.h"
#include "mongo/s/cursors.h"
@@ -61,7 +62,7 @@ namespace mongo {
string ns = from.getStringField(ChunkType::ns().c_str());
_shard.reset(from.getStringField(ChunkType::shard().c_str()));
- _lastmod = ShardChunkVersion::fromBSON(from[ChunkType::DEPRECATED_lastmod()]);
+ _lastmod = ChunkVersion::fromBSON(from[ChunkType::DEPRECATED_lastmod()]);
verify( _lastmod.isSet() );
_min = from.getObjectField(ChunkType::min().c_str()).getOwned();
@@ -78,7 +79,7 @@ namespace mongo {
uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() );
}
- Chunk::Chunk(const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard, ShardChunkVersion lastmod)
+ Chunk::Chunk(const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard, ChunkVersion lastmod)
: _manager(info), _min(min), _max(max), _shard(shard), _lastmod(lastmod), _jumbo(false), _dataWritten(mkDataWritten())
{}
@@ -485,7 +486,7 @@ namespace mongo {
return _min.woCompare( s._min ) == 0 && _max.woCompare( s._max ) == 0;
}
- void Chunk::serialize(BSONObjBuilder& to,ShardChunkVersion myLastMod) {
+ void Chunk::serialize(BSONObjBuilder& to,ChunkVersion myLastMod) {
to.append( "_id" , genID( _manager->getns() , _min ) );
@@ -621,7 +622,7 @@ namespace mongo {
verify( _ns != "" );
verify( ! _key.key().isEmpty() );
- _version = ShardChunkVersion::fromBSON( collDoc );
+ _version = ChunkVersion::fromBSON( collDoc );
}
ChunkManager::ChunkManager( ChunkManagerPtr oldManager ) :
@@ -739,8 +740,8 @@ namespace mongo {
{
// Reset the max version, but not the epoch, when we aren't loading from the oldManager
- _version = ShardChunkVersion( 0, _version.epoch() );
- set<ShardChunkVersion> minorVersions;
+ _version = ChunkVersion( 0, _version.epoch() );
+ set<ChunkVersion> minorVersions;
// If we have a previous version of the ChunkManager to work from, use that info to reduce
// our config query
@@ -803,7 +804,7 @@ namespace mongo {
// Set all our data to empty
chunkMap.clear();
shardVersions.clear();
- _version = ShardChunkVersion( 0, OID() );
+ _version = ChunkVersion( 0, OID() );
return true;
}
@@ -827,7 +828,7 @@ namespace mongo {
// Set all our data to empty to be extra safe
chunkMap.clear();
shardVersions.clear();
- _version = ShardChunkVersion( 0, OID() );
+ _version = ChunkVersion( 0, OID() );
return allInconsistent;
}
@@ -838,15 +839,15 @@ namespace mongo {
return grid.getDBConfig(getns())->getChunkManager(getns(), force);
}
- void ChunkManager::markMinorForReload( ShardChunkVersion majorVersion ) const {
+ void ChunkManager::markMinorForReload( ChunkVersion majorVersion ) const {
_splitHeuristics.markMinorForReload( getns(), majorVersion );
}
- void ChunkManager::getMarkedMinorVersions( set<ShardChunkVersion>& minorVersions ) const {
+ void ChunkManager::getMarkedMinorVersions( set<ChunkVersion>& minorVersions ) const {
_splitHeuristics.getMarkedMinorVersions( minorVersions );
}
- void ChunkManager::SplitHeuristics::markMinorForReload( const string& ns, ShardChunkVersion majorVersion ) {
+ void ChunkManager::SplitHeuristics::markMinorForReload( const string& ns, ChunkVersion majorVersion ) {
// When we get a stale minor version, it means that some *other* mongos has just split a
// chunk into a number of smaller parts, so we shouldn't need reload the data needed to
@@ -884,9 +885,9 @@ namespace mongo {
grid.getDBConfig( ns )->getChunkManagerIfExists( ns, true, true );
}
- void ChunkManager::SplitHeuristics::getMarkedMinorVersions( set<ShardChunkVersion>& minorVersions ) {
+ void ChunkManager::SplitHeuristics::getMarkedMinorVersions( set<ChunkVersion>& minorVersions ) {
scoped_lock lk( _staleMinorSetMutex );
- for( set<ShardChunkVersion>::iterator it = _staleMinorSet.begin(); it != _staleMinorSet.end(); it++ ){
+ for( set<ChunkVersion>::iterator it = _staleMinorSet.begin(); it != _staleMinorSet.end(); it++ ){
minorVersions.insert( *it );
}
}
@@ -993,7 +994,7 @@ namespace mongo {
&splitPoints, &shards );
// this is the first chunk; start the versioning from scratch
- ShardChunkVersion version;
+ ChunkVersion version;
version.incEpoch();
version.incMajor();
@@ -1038,7 +1039,7 @@ namespace mongo {
conn->done();
- _version = ShardChunkVersion( 0, version.epoch() );
+ _version = ChunkVersion( 0, version.epoch() );
}
ChunkPtr ChunkManager::findIntersectingChunk( const BSONObj& point ) const {
@@ -1269,7 +1270,7 @@ namespace mongo {
if ( ! setShardVersion( conn->conn(),
_ns,
- ShardChunkVersion( 0, OID() ),
+ ChunkVersion( 0, OID() ),
ChunkManagerPtr(),
true, res ) )
{
@@ -1284,14 +1285,14 @@ namespace mongo {
configServer.logChange( "dropCollection" , _ns , BSONObj() );
}
- ShardChunkVersion ChunkManager::getVersion( const Shard& shard ) const {
+ ChunkVersion ChunkManager::getVersion( const Shard& shard ) const {
ShardVersionMap::const_iterator i = _shardVersions.find( shard );
if ( i == _shardVersions.end() )
- return ShardChunkVersion( 0, OID() );
+ return ChunkVersion( 0, OID() );
return i->second;
}
- ShardChunkVersion ChunkManager::getVersion() const {
+ ChunkVersion ChunkManager::getVersion() const {
return _version;
}
@@ -1416,12 +1417,12 @@ namespace mongo {
class ChunkObjUnitTest : public StartupTest {
public:
- void runShardChunkVersion() {
- vector<ShardChunkVersion> all;
- all.push_back( ShardChunkVersion(1,1, OID()) );
- all.push_back( ShardChunkVersion(1,2, OID()) );
- all.push_back( ShardChunkVersion(2,1, OID()) );
- all.push_back( ShardChunkVersion(2,2, OID()) );
+ void runChunkVersion() {
+ vector<ChunkVersion> all;
+ all.push_back( ChunkVersion(1,1, OID()) );
+ all.push_back( ChunkVersion(1,2, OID()) );
+ all.push_back( ChunkVersion(2,1, OID()) );
+ all.push_back( ChunkVersion(2,2, OID()) );
for ( unsigned i=0; i<all.size(); i++ ) {
for ( unsigned j=i+1; j<all.size(); j++ ) {
@@ -1432,7 +1433,7 @@ namespace mongo {
}
void run() {
- runShardChunkVersion();
+ runChunkVersion();
LOG(1) << "shardObjTest passed" << endl;
}
} shardObjTest;
@@ -1446,7 +1447,7 @@ namespace mongo {
// its name and through the 'setShardVersion' command call
bool setShardVersion( DBClientBase & conn,
const string& ns,
- ShardChunkVersion version,
+ ChunkVersion version,
ChunkManagerPtr manager, // Used only for reporting!
bool authoritative ,
BSONObj& result )
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index 87a9642dafb..4bc366eb53d 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -20,9 +20,9 @@
#include "mongo/bson/util/atomic_int.h"
#include "mongo/client/distlock.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/shard.h"
#include "mongo/s/shardkey.h"
-#include "mongo/s/util.h"
#include "mongo/util/concurrency/ticketholder.h"
namespace mongo {
@@ -55,13 +55,13 @@ namespace mongo {
const BSONObj& min,
const BSONObj& max,
const Shard& shard,
- ShardChunkVersion lastmod = ShardChunkVersion() );
+ ChunkVersion lastmod = ChunkVersion() );
//
// serialization support
//
- void serialize(BSONObjBuilder& to, ShardChunkVersion myLastMod=ShardChunkVersion(0,OID()));
+ void serialize(BSONObjBuilder& to, ChunkVersion myLastMod=ChunkVersion(0,OID()));
//
// chunk boundary support
@@ -90,8 +90,8 @@ namespace mongo {
void appendShortVersion( const char * name , BSONObjBuilder& b ) const;
- ShardChunkVersion getLastmod() const { return _lastmod; }
- void setLastmod( ShardChunkVersion v ) { _lastmod = v; }
+ ChunkVersion getLastmod() const { return _lastmod; }
+ void setLastmod( ChunkVersion v ) { _lastmod = v; }
//
// split support
@@ -211,7 +211,7 @@ namespace mongo {
BSONObj _min;
BSONObj _max;
Shard _shard;
- ShardChunkVersion _lastmod;
+ ChunkVersion _lastmod;
mutable bool _jumbo;
// transient stuff
@@ -319,7 +319,7 @@ namespace mongo {
*/
class ChunkManager {
public:
- typedef map<Shard,ShardChunkVersion> ShardVersionMap;
+ typedef map<Shard,ChunkVersion> ShardVersionMap;
// Loads a new chunk manager from a collection document
ChunkManager( const BSONObj& collDoc );
@@ -410,8 +410,8 @@ namespace mongo {
string toString() const;
- ShardChunkVersion getVersion( const Shard& shard ) const;
- ShardChunkVersion getVersion() const;
+ ChunkVersion getVersion( const Shard& shard ) const;
+ ChunkVersion getVersion() const;
void getInfo( BSONObjBuilder& b ) const;
@@ -426,8 +426,8 @@ namespace mongo {
ChunkManagerPtr reload(bool force=true) const; // doesn't modify self!
- void markMinorForReload( ShardChunkVersion majorVersion ) const;
- void getMarkedMinorVersions( set<ShardChunkVersion>& minorVersions ) const;
+ void markMinorForReload( ChunkVersion majorVersion ) const;
+ void getMarkedMinorVersions( set<ChunkVersion>& minorVersions ) const;
private:
@@ -453,7 +453,7 @@ namespace mongo {
const ShardVersionMap _shardVersions; // max version per shard
// max version of any chunk
- ShardChunkVersion _version;
+ ChunkVersion _version;
// the previous manager this was based on
// cleared after loading chunks
@@ -476,8 +476,8 @@ namespace mongo {
_staleMinorSetMutex( "SplitHeuristics::staleMinorSet" ),
_staleMinorCount( 0 ) {}
- void markMinorForReload( const string& ns, ShardChunkVersion majorVersion );
- void getMarkedMinorVersions( set<ShardChunkVersion>& minorVersions );
+ void markMinorForReload( const string& ns, ChunkVersion majorVersion );
+ void getMarkedMinorVersions( set<ChunkVersion>& minorVersions );
TicketHolder _splitTickets;
@@ -485,7 +485,7 @@ namespace mongo {
// mutex protects below
int _staleMinorCount;
- set<ShardChunkVersion> _staleMinorSet;
+ set<ChunkVersion> _staleMinorSet;
// Test whether we should split once data * splitTestFactor > chunkSize (approximately)
static const int splitTestFactor = 5;
@@ -553,7 +553,7 @@ namespace mongo {
bool setShardVersion( DBClientBase & conn,
const string& ns,
- ShardChunkVersion version,
+ ChunkVersion version,
ChunkManagerPtr manager,
bool authoritative,
BSONObj& result );
diff --git a/src/mongo/s/chunk_diff.h b/src/mongo/s/chunk_diff.h
index a0ed9dd31d6..be9fbc79339 100644
--- a/src/mongo/s/chunk_diff.h
+++ b/src/mongo/s/chunk_diff.h
@@ -18,12 +18,11 @@
#pragma once
-#include "util.h"
-#include "../bson/bsonobj.h"
-#include "../client/dbclientcursor.h"
-#include "../client/connpool.h"
-
+#include "mongo/bson/bsonobj.h"
+#include "mongo/client/dbclientcursor.h"
+#include "mongo/client/connpool.h"
#include "mongo/s/chunk.h"
+#include "mongo/s/chunk_version.h"
namespace mongo {
@@ -66,8 +65,8 @@ namespace mongo {
*/
void attach( const string& ns,
RangeMap& currMap,
- ShardChunkVersion& maxVersion,
- map<ShardType, ShardChunkVersion>& maxShardVersions )
+ ChunkVersion& maxVersion,
+ map<ShardType, ChunkVersion>& maxShardVersions )
{
_ns = ns;
_currMap = &currMap;
@@ -136,7 +135,7 @@ namespace mongo {
// Returns the number of diffs processed, or -1 if the diffs were inconsistent
// Throws a DBException on connection errors
int calculateConfigDiff( string config,
- const set<ShardChunkVersion>& extraMinorVersions = set<ShardChunkVersion>() );
+ const set<ChunkVersion>& extraMinorVersions = set<ChunkVersion>() );
// Applies changes to the config data from a cursor passed in
// Returns the number of diffs processed, or -1 if the diffs were inconsistent
@@ -145,14 +144,14 @@ namespace mongo {
// Returns the query needed to find new changes to a collection from the config server
// Needed only if a custom connection is required to the config server
- Query configDiffQuery( const set<ShardChunkVersion>& extraMinorVersions = set<ShardChunkVersion>() ) const;
+ Query configDiffQuery( const set<ChunkVersion>& extraMinorVersions = set<ChunkVersion>() ) const;
private:
string _ns;
RangeMap* _currMap;
- ShardChunkVersion* _maxVersion;
- map<ShardType, ShardChunkVersion>* _maxShardVersions;
+ ChunkVersion* _maxVersion;
+ map<ShardType, ChunkVersion>* _maxShardVersions;
// Store for later use
int _validDiffs;
diff --git a/src/mongo/s/chunk_diff.hpp b/src/mongo/s/chunk_diff.hpp
index 319a426cbea..0206851d26e 100644
--- a/src/mongo/s/chunk_diff.hpp
+++ b/src/mongo/s/chunk_diff.hpp
@@ -19,6 +19,7 @@
#pragma once
#include "mongo/s/chunk_diff.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/type_chunk.h"
namespace mongo {
@@ -75,7 +76,7 @@ namespace mongo {
template < class ValType, class ShardType >
int ConfigDiffTracker<ValType,ShardType>::
calculateConfigDiff( string config,
- const set<ShardChunkVersion>& extraMinorVersions )
+ const set<ChunkVersion>& extraMinorVersions )
{
verifyAttached();
@@ -130,7 +131,7 @@ namespace mongo {
BSONObj diffChunkDoc = diffCursor.next();
- ShardChunkVersion chunkVersion = ShardChunkVersion::fromBSON(diffChunkDoc, ChunkType::DEPRECATED_lastmod());
+ ChunkVersion chunkVersion = ChunkVersion::fromBSON(diffChunkDoc, ChunkType::DEPRECATED_lastmod());
if( diffChunkDoc[ChunkType::min()].type() != Object ||
diffChunkDoc[ChunkType::max()].type() != Object ||
@@ -145,7 +146,7 @@ namespace mongo {
warning() << "got invalid chunk version " << chunkVersion << " in document " << diffChunkDoc
<< " when trying to load differing chunks at version "
- << ShardChunkVersion( _maxVersion->toLong(), currEpoch ) << endl;
+ << ChunkVersion( _maxVersion->toLong(), currEpoch ) << endl;
// Don't keep loading, since we know we'll be broken here
return -1;
@@ -158,7 +159,7 @@ namespace mongo {
// Chunk version changes
ShardType shard = shardFor( diffChunkDoc[ChunkType::shard()].String() );
- typename map<ShardType, ShardChunkVersion>::iterator shardVersionIt = _maxShardVersions->find( shard );
+ typename map<ShardType, ChunkVersion>::iterator shardVersionIt = _maxShardVersions->find( shard );
if( shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion ){
(*_maxShardVersions)[ shard ] = chunkVersion;
}
@@ -198,7 +199,7 @@ namespace mongo {
template < class ValType, class ShardType >
Query ConfigDiffTracker<ValType,ShardType>::
- configDiffQuery( const set<ShardChunkVersion>& extraMinorVersions ) const
+ configDiffQuery( const set<ChunkVersion>& extraMinorVersions ) const
{
verifyAttached();
@@ -246,7 +247,7 @@ namespace mongo {
// Get any shard version changes higher than we know currently
// Needed since there could have been a split of the max version chunk of any shard
// TODO: Ideally, we shouldn't care about these
- for( typename map<ShardType, ShardChunkVersion>::const_iterator it = _maxShardVersions->begin(); it != _maxShardVersions->end(); it++ ){
+ for( typename map<ShardType, ChunkVersion>::const_iterator it = _maxShardVersions->begin(); it != _maxShardVersions->end(); it++ ){
BSONObjBuilder queryShardB( queryOrB.subobjStart() );
queryShardB.append(ChunkType::shard(), nameFrom( it->first ) );
@@ -260,14 +261,14 @@ namespace mongo {
// Get any minor version changes we've marked as interesting
// TODO: Ideally we shouldn't care about these
- for( set<ShardChunkVersion>::const_iterator it = extraMinorVersions.begin(); it != extraMinorVersions.end(); it++ ){
+ for( set<ChunkVersion>::const_iterator it = extraMinorVersions.begin(); it != extraMinorVersions.end(); it++ ){
BSONObjBuilder queryShardB( queryOrB.subobjStart() );
{
BSONObjBuilder ts(queryShardB.subobjStart(ChunkType::DEPRECATED_lastmod()));
ts.appendTimestamp( "$gt", it->toLong() );
ts.appendTimestamp( "$lt",
- ShardChunkVersion( it->majorVersion() + 1, 0, OID() ).toLong() );
+ ChunkVersion( it->majorVersion() + 1, 0, OID() ).toLong() );
ts.done();
}
queryShardB.done();
diff --git a/src/mongo/s/chunk_version.h b/src/mongo/s/chunk_version.h
new file mode 100644
index 00000000000..dfb65c2c3d8
--- /dev/null
+++ b/src/mongo/s/chunk_version.h
@@ -0,0 +1,319 @@
+/**
+* Copyright (C) 2012 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "mongo/db/jsobj.h"
+
+namespace mongo {
+
+ //
+ // ChunkVersions consist of a major/minor version scoped to a version epoch
+ //
+ struct ChunkVersion {
+ union {
+ struct {
+ int _minor;
+ int _major;
+ };
+ unsigned long long _combined;
+ };
+ OID _epoch;
+
+ ChunkVersion() : _minor(0), _major(0), _epoch(OID()) {}
+
+ //
+ // Constructors shouldn't have default parameters here, since it's vital we track from
+ // here on the epochs of versions, even if not used.
+ //
+
+ ChunkVersion( int major, int minor, const OID& epoch )
+ : _minor(minor),_major(major), _epoch(epoch) {
+ }
+
+ ChunkVersion( unsigned long long ll, const OID& epoch )
+ : _combined( ll ), _epoch(epoch) {
+ }
+
+ void inc( bool major ) {
+ if ( major )
+ incMajor();
+ else
+ incMinor();
+ }
+
+ void incMajor() {
+ _major++;
+ _minor = 0;
+ }
+
+ void incMinor() {
+ _minor++;
+ }
+
+ // Incrementing an epoch creates a new, randomly generated identifier
+ void incEpoch() {
+ _epoch = OID::gen();
+ _major = 0;
+ _minor = 0;
+ }
+
+ // Note: this shouldn't be used as a substitute for version except in specific cases -
+ // epochs make versions more complex
+ unsigned long long toLong() const {
+ return _combined;
+ }
+
+ bool isSet() const {
+ return _combined > 0;
+ }
+
+ bool isEpochSet() const {
+ return _epoch.isSet();
+ }
+
+ string toString() const {
+ stringstream ss;
+ // Similar to month/day/year. For the most part when debugging, we care about major
+ // so it's first
+ ss << _major << "|" << _minor << "||" << _epoch;
+ return ss.str();
+ }
+
+ int majorVersion() const { return _major; }
+ int minorVersion() const { return _minor; }
+ OID epoch() const { return _epoch; }
+
+ //
+ // Explicit comparison operators - versions with epochs have non-trivial comparisons.
+ // > < operators do not check epoch cases. Generally if using == we need to handle
+ // more complex cases.
+ //
+
+ bool operator>( const ChunkVersion& otherVersion ) const {
+ return this->_combined > otherVersion._combined;
+ }
+
+ bool operator>=( const ChunkVersion& otherVersion ) const {
+ return this->_combined >= otherVersion._combined;
+ }
+
+ bool operator<( const ChunkVersion& otherVersion ) const {
+ return this->_combined < otherVersion._combined;
+ }
+
+ bool operator<=( const ChunkVersion& otherVersion ) const {
+ return this->_combined < otherVersion._combined;
+ }
+
+ //
+ // Equivalence comparison types.
+ //
+
+ // Can we write to this data and not have a problem?
+ bool isWriteCompatibleWith( const ChunkVersion& otherVersion ) const {
+ if( ! hasCompatibleEpoch( otherVersion ) ) return false;
+ return otherVersion._major == _major;
+ }
+
+ // Is this the same version?
+ bool isEquivalentTo( const ChunkVersion& otherVersion ) const {
+ if( ! hasCompatibleEpoch( otherVersion ) ) return false;
+ return otherVersion._combined == _combined;
+ }
+
+ // Is this in the same epoch?
+ bool hasCompatibleEpoch( const ChunkVersion& otherVersion ) const {
+ return hasCompatibleEpoch( otherVersion._epoch );
+ }
+
+ bool hasCompatibleEpoch( const OID& otherEpoch ) const {
+ // TODO : Change logic from eras are not-unequal to eras are equal
+ if( otherEpoch.isSet() && _epoch.isSet() && otherEpoch != _epoch ) return false;
+ return true;
+ }
+
+ //
+ // BSON input/output
+ //
+ // The idea here is to make the BSON input style very flexible right now, so we
+ // can then tighten it up in the next version. We can accept either a BSONObject field
+ // with version and epoch, or version and epoch in different fields (either is optional).
+ // In this case, epoch always is stored in a field name of the version field name + "Epoch"
+ //
+
+ //
+ // { version : <TS> } and { version : [<TS>,<OID>] } format
+ //
+
+ static bool canParseBSON( const BSONElement& el, const string& prefix="" ){
+ bool canParse;
+ fromBSON( el, prefix, &canParse );
+ return canParse;
+ }
+
+ static ChunkVersion fromBSON( const BSONElement& el, const string& prefix="" ){
+ bool canParse;
+ return fromBSON( el, prefix, &canParse );
+ }
+
+ static ChunkVersion fromBSON( const BSONElement& el,
+ const string& prefix,
+ bool* canParse )
+ {
+ *canParse = true;
+
+ int type = el.type();
+
+ if( type == Array ){
+ return fromBSON( BSONArray( el.Obj() ), canParse );
+ }
+
+ if( type == jstOID ){
+ return ChunkVersion( 0, 0, el.OID() );
+ }
+
+ if( el.isNumber() ){
+ return ChunkVersion( static_cast<unsigned long long>(el.numberLong()), OID() );
+ }
+
+ if( type == Timestamp || type == Date ){
+ return ChunkVersion( el._numberLong(), OID() );
+ }
+
+ // Note - we used to throw here, we can't anymore b/c debug builds will be unhappy
+ warning() << "can't load version from element type (" << (int)(el.type()) << ") "
+ << el << endl;
+
+ *canParse = false;
+
+ return ChunkVersion( 0, OID() );
+ }
+
+ //
+ // { version : <TS>, versionEpoch : <OID> } object format
+ //
+
+ static bool canParseBSON( const BSONObj& obj, const string& prefix="" ){
+ bool canParse;
+ fromBSON( obj, prefix, &canParse );
+ return canParse;
+ }
+
+ static ChunkVersion fromBSON( const BSONObj& obj, const string& prefix="" ){
+ bool canParse;
+ return fromBSON( obj, prefix, &canParse );
+ }
+
+ static ChunkVersion fromBSON( const BSONObj& obj,
+ const string& prefixIn,
+ bool* canParse )
+ {
+ *canParse = true;
+
+ string prefix = prefixIn;
+ // "version" doesn't have a "cluster constanst" because that field is never
+ // written to the config.
+ if( prefixIn == "" && ! obj[ "version" ].eoo() ){
+ prefix = (string)"version";
+ }
+ // TODO: use ChunkType::DEPRECATED_lastmod()
+ // NOTE: type_chunk.h includes this file
+ else if( prefixIn == "" && ! obj["lastmod"].eoo() ){
+ prefix = (string)"lastmod";
+ }
+
+ ChunkVersion version = fromBSON( obj[ prefix ], prefixIn, canParse );
+
+ if( obj[ prefix + "Epoch" ].type() == jstOID ){
+ version._epoch = obj[ prefix + "Epoch" ].OID();
+ *canParse = true;
+ }
+
+ return version;
+ }
+
+ //
+ // { version : [<TS>, <OID>] } format
+ //
+
+ static bool canParseBSON( const BSONArray& arr ){
+ bool canParse;
+ fromBSON( arr, &canParse );
+ return canParse;
+ }
+
+ static ChunkVersion fromBSON( const BSONArray& arr ){
+ bool canParse;
+ return fromBSON( arr, &canParse );
+ }
+
+ static ChunkVersion fromBSON( const BSONArray& arr,
+ bool* canParse )
+ {
+ *canParse = false;
+
+ ChunkVersion version;
+
+ BSONObjIterator it( arr );
+ if( ! it.more() ) return version;
+
+ version = fromBSON( it.next(), "", canParse );
+ if( ! canParse ) return version;
+
+ *canParse = true;
+
+ if( ! it.more() ) return version;
+ BSONElement next = it.next();
+ if( next.type() != jstOID ) return version;
+
+ version._epoch = next.OID();
+
+ return version;
+ }
+
+ //
+ // Currently our BSON output is to two different fields, to cleanly work with older
+ // versions that know nothing about epochs.
+ //
+
+ BSONObj toBSON( const string& prefixIn="" ) const {
+ BSONObjBuilder b;
+
+ string prefix = prefixIn;
+ if( prefix == "" ) prefix = "version";
+
+ b.appendTimestamp( prefix, _combined );
+ b.append( prefix + "Epoch", _epoch );
+ return b.obj();
+ }
+
+ void addToBSON( BSONObjBuilder& b, const string& prefix="" ) const {
+ b.appendElements( toBSON( prefix ) );
+ }
+
+ void addEpochToBSON( BSONObjBuilder& b, const string& prefix="" ) const {
+ b.append( prefix + "Epoch", _epoch );
+ }
+
+ };
+
+ inline ostream& operator<<( ostream &s , const ChunkVersion& v) {
+ s << v.toString();
+ return s;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/chunk_version_test.cpp b/src/mongo/s/chunk_version_test.cpp
new file mode 100644
index 00000000000..e64e91078a0
--- /dev/null
+++ b/src/mongo/s/chunk_version_test.cpp
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) 2012 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mongo/pch.h"
+
+#include "mongo/db/jsobj.h"
+#include "mongo/s/chunk_version.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+ /**
+ * Tests parsing of BSON for versions. In version 2.2, this parsing is meant to be very
+ * flexible so different formats can be tried and enforced later.
+ *
+ * Formats are:
+ *
+ * A) { vFieldName : <TSTYPE>, [ vFieldNameEpoch : <OID> ], ... }
+ * B) { fieldName : [ <TSTYPE>, <OID> ], ... }
+ *
+ * vFieldName is a specifyable name - usually "version" (default) or "lastmod". <TSTYPE> is a
+ * type convertible to Timestamp, ideally Timestamp but also numeric.
+ * <OID> is a value of type OID.
+ *
+ */
+
+ TEST(Compatibility, LegacyFormatA) {
+ BSONObjBuilder versionObjB;
+ versionObjB.appendTimestamp( "testVersion",
+ ChunkVersion( 1, 1, OID() ).toLong() );
+ versionObjB.append( "testVersionEpoch", OID::gen() );
+ BSONObj versionObj = versionObjB.obj();
+
+ ChunkVersion parsed =
+ ChunkVersion::fromBSON( versionObj[ "testVersion" ] );
+
+ ASSERT( ChunkVersion::canParseBSON( versionObj[ "testVersion" ] ) );
+ ASSERT( parsed.majorVersion() == 1 );
+ ASSERT( parsed.minorVersion() == 1 );
+ ASSERT( ! parsed.epoch().isSet() );
+
+ parsed = ChunkVersion::fromBSON( versionObj, "testVersion" );
+
+ ASSERT( ChunkVersion::canParseBSON( versionObj, "testVersion" ) );
+ ASSERT( parsed.majorVersion() == 1 );
+ ASSERT( parsed.minorVersion() == 1 );
+ ASSERT( parsed.epoch().isSet() );
+ }
+
+ TEST(Compatibility, SubArrayFormatB) {
+ BSONObjBuilder tsObjB;
+ tsObjB.appendTimestamp( "ts", ChunkVersion( 1, 1, OID() ).toLong() );
+ BSONObj tsObj = tsObjB.obj();
+
+ BSONObjBuilder versionObjB;
+ BSONArrayBuilder subArrB( versionObjB.subarrayStart( "testVersion" ) );
+ // Append this weird way so we're sure we get a timestamp type
+ subArrB.append( tsObj.firstElement() );
+ subArrB.append( OID::gen() );
+ subArrB.done();
+ BSONObj versionObj = versionObjB.obj();
+
+ ChunkVersion parsed =
+ ChunkVersion::fromBSON( versionObj[ "testVersion" ] );
+
+ ASSERT( ChunkVersion::canParseBSON( versionObj[ "testVersion" ] ) );
+ ASSERT( ChunkVersion::canParseBSON( BSONArray( versionObj[ "testVersion" ].Obj() ) ) );
+ ASSERT( parsed.majorVersion() == 1 );
+ ASSERT( parsed.minorVersion() == 1 );
+ ASSERT( parsed.epoch().isSet() );
+ }
+
+} // unnamed namespace
+} // namespace mongo
diff --git a/src/mongo/s/collection_manager.cpp b/src/mongo/s/collection_manager.cpp
index e7b82a5f4c8..0aa7e26cdc6 100644
--- a/src/mongo/s/collection_manager.cpp
+++ b/src/mongo/s/collection_manager.cpp
@@ -28,7 +28,7 @@ namespace mongo {
CollectionManager::~CollectionManager() { }
CollectionManager* CollectionManager::cloneMinus(const ChunkType& chunk,
- const ShardChunkVersion& newShardVersion,
+ const ChunkVersion& newShardVersion,
string* errMsg) const {
// The error message string is optional.
string dummy;
@@ -80,7 +80,7 @@ namespace mongo {
}
CollectionManager* CollectionManager::clonePlus(const ChunkType& chunk,
- const ShardChunkVersion& newShardVersion,
+ const ChunkVersion& newShardVersion,
string* errMsg) const {
// The error message string is optional.
string dummy;
@@ -132,7 +132,7 @@ namespace mongo {
CollectionManager* CollectionManager::cloneSplit(const ChunkType& chunk,
const vector<BSONObj>& splitKeys,
- const ShardChunkVersion& newShardVersion,
+ const ChunkVersion& newShardVersion,
string* errMsg) const {
// The error message string is optional.
string dummy;
diff --git a/src/mongo/s/collection_manager.h b/src/mongo/s/collection_manager.h
index e1158d99f5b..8182f9bb055 100644
--- a/src/mongo/s/collection_manager.h
+++ b/src/mongo/s/collection_manager.h
@@ -18,8 +18,8 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/type_chunk.h"
-#include "mongo/s/util.h" // for ShardChunkVersion
namespace mongo {
@@ -56,7 +56,7 @@ namespace mongo {
* provided.
*/
CollectionManager* cloneMinus(const ChunkType& chunk,
- const ShardChunkVersion& newShardVersion,
+ const ChunkVersion& newShardVersion,
string* errMsg) const;
/**
@@ -68,7 +68,7 @@ namespace mongo {
* provided.
*/
CollectionManager* clonePlus(const ChunkType& chunk,
- const ShardChunkVersion& newShardVersion,
+ const ChunkVersion& newShardVersion,
string* errMsg) const;
/**
@@ -82,7 +82,7 @@ namespace mongo {
*/
CollectionManager* cloneSplit(const ChunkType& chunk,
const vector<BSONObj>& splitKeys,
- const ShardChunkVersion& newShardVersion,
+ const ChunkVersion& newShardVersion,
string* errMsg) const;
//
@@ -109,9 +109,9 @@ namespace mongo {
// accessors
//
- ShardChunkVersion getMaxCollVersion() const { return _maxCollVersion; }
+ ChunkVersion getMaxCollVersion() const { return _maxCollVersion; }
- ShardChunkVersion getMaxShardVersion() const { return _maxShardVersion; }
+ ChunkVersion getMaxShardVersion() const { return _maxShardVersion; }
BSONObj getKey() const { return _key; }
@@ -126,14 +126,14 @@ namespace mongo {
// a version for this collection that identifies the collection incarnation (ie, a
// dropped and recreated collection with the same name would have a different version)
- ShardChunkVersion _maxCollVersion;
+ ChunkVersion _maxCollVersion;
//
// sharded state below, for when the colelction gets sharded
//
- // highest ShardChunkVersion for which this manager's information is accurate
- ShardChunkVersion _maxShardVersion;
+ // highest ChunkVersion for which this manager's information is accurate
+ ChunkVersion _maxShardVersion;
// key pattern for chunks under this range
BSONObj _key;
diff --git a/src/mongo/s/collection_manager_test.cpp b/src/mongo/s/collection_manager_test.cpp
index 33b4fd3c713..b4f1ca82a45 100644
--- a/src/mongo/s/collection_manager_test.cpp
+++ b/src/mongo/s/collection_manager_test.cpp
@@ -21,11 +21,11 @@
#include "mongo/db/jsobj.h"
#include "mongo/dbtests/mock/mock_conn_registry.h"
#include "mongo/dbtests/mock/mock_remote_db_server.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/collection_manager.h"
#include "mongo/s/metadata_loader.h"
#include "mongo/s/type_chunk.h"
#include "mongo/s/type_collection.h"
-#include "mongo/s/util.h" // for ShardChunkVersion
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
@@ -44,7 +44,7 @@ namespace {
using mongo::MetadataLoader;
using mongo::MINKEY;
using mongo::OID;
- using mongo::ShardChunkVersion;
+ using mongo::ChunkVersion;
using mongo::MockConnRegistry;
using mongo::MockRemoteDBServer;
using std::string;
@@ -101,7 +101,7 @@ namespace {
splitKeys.push_back(BSON("a" << 15));
// Setup version to use on splitting.
- //ShardChunkVersion nextVersion = manager->getMaxShardVersion();
+ //ChunkVersion nextVersion = manager->getMaxShardVersion();
//nextVersion.incMinor();
//string errMsg;
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 869d4cbd4cb..efeee642292 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -26,6 +26,7 @@
#include "mongo/db/cmdline.h"
#include "mongo/db/pdfile.h"
#include "mongo/s/chunk.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/server.h"
@@ -288,7 +289,7 @@ namespace mongo {
ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload, bool forceReload ) {
BSONObj key;
- ShardChunkVersion oldVersion;
+ ChunkVersion oldVersion;
ChunkManagerPtr oldManager;
{
@@ -328,7 +329,7 @@ namespace mongo {
conn->done();
if ( ! newest.isEmpty() ) {
- ShardChunkVersion v = ShardChunkVersion::fromBSON(newest, ChunkType::DEPRECATED_lastmod());
+ ChunkVersion v = ChunkVersion::fromBSON(newest, ChunkType::DEPRECATED_lastmod());
if ( v.isEquivalentTo( oldVersion ) ) {
scoped_lock lk( _lock );
CollectionInfo& ci = _collections[ns];
@@ -358,8 +359,8 @@ namespace mongo {
CollectionInfo& ci = _collections[ns];
if ( ci.isSharded() && ci.getCM() ) {
- ShardChunkVersion currentVersion =
- ShardChunkVersion::fromBSON(newest, ChunkType::DEPRECATED_lastmod());
+ ChunkVersion currentVersion =
+ ChunkVersion::fromBSON(newest, ChunkType::DEPRECATED_lastmod());
// Only reload if the version we found is newer than our own in the same
// epoch
diff --git a/src/mongo/s/d_chunk_manager.cpp b/src/mongo/s/d_chunk_manager.cpp
index abf267f19cb..2089ca267e8 100644
--- a/src/mongo/s/d_chunk_manager.cpp
+++ b/src/mongo/s/d_chunk_manager.cpp
@@ -103,9 +103,9 @@ namespace mongo {
_fillCollectionKey( collectionDoc );
- map<string,ShardChunkVersion> versionMap;
+ map<string,ChunkVersion> versionMap;
versionMap[ shardName ] = _version;
- _collVersion = ShardChunkVersion( 0, OID() );
+ _collVersion = ChunkVersion( 0, OID() );
// Check to see if we have an old ShardChunkManager to use
if( oldManager && oldManager->_collVersion.isSet() ){
@@ -147,8 +147,8 @@ namespace mongo {
// No chunks were found for the ns
warning() << "no chunks found when reloading " << ns << ", previous version was " << _collVersion << endl;
- _version = ShardChunkVersion( 0, OID() );
- _collVersion = ShardChunkVersion( 0, OID() );
+ _version = ChunkVersion( 0, OID() );
+ _collVersion = ChunkVersion( 0, OID() );
_chunksMap.clear();
}
else{
@@ -191,13 +191,13 @@ namespace mongo {
void ShardChunkManager::_fillChunks( DBClientCursorInterface* cursor ) {
verify( cursor );
- ShardChunkVersion version;
+ ChunkVersion version;
while ( cursor->more() ) {
BSONObj d = cursor->next();
_chunksMap.insert(make_pair(d[ChunkType::min()].Obj().getOwned(),
d[ChunkType::max()].Obj().getOwned()));
- ShardChunkVersion currVersion = ShardChunkVersion::fromBSON(d[ChunkType::DEPRECATED_lastmod()]);
+ ChunkVersion currVersion = ChunkVersion::fromBSON(d[ChunkType::DEPRECATED_lastmod()]);
if ( currVersion > version ) {
version = currVersion;
}
@@ -322,7 +322,7 @@ namespace mongo {
}
}
- ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ShardChunkVersion& version ) {
+ ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ChunkVersion& version ) {
// check that we have the exact chunk that will be subtracted
_assertChunkExists( min , max );
@@ -334,7 +334,7 @@ namespace mongo {
// if left with no chunks, just reset version
uassert( 13590 , str::stream() << "setting version to " << version.toString() << " on removing last chunk", ! version.isSet() );
- p->_version = ShardChunkVersion( 0, OID() );
+ p->_version = ChunkVersion( 0, OID() );
p->_collVersion = _collVersion;
}
@@ -360,7 +360,7 @@ namespace mongo {
return ! ( ( h1.woCompare( l2 ) <= 0 ) || ( h2.woCompare( l1 ) <= 0 ) );
}
- ShardChunkManager* ShardChunkManager::clonePlus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version ) {
+ ShardChunkManager* ShardChunkManager::clonePlus( const BSONObj& min , const BSONObj& max , const ChunkVersion& version ) {
// it is acceptable to move version backwards (e.g., undoing a migration that went bad during commit)
// but only cloning away the last chunk may reset the version to 0
@@ -396,7 +396,7 @@ namespace mongo {
}
ShardChunkManager* ShardChunkManager::cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- const ShardChunkVersion& version ) {
+ const ChunkVersion& version ) {
// the version required in both resulting chunks could be simply an increment in the minor portion of the current version
// however, we are enforcing uniqueness over the attributes <ns, lastmod> of the configdb collection 'chunks'
diff --git a/src/mongo/s/d_chunk_manager.h b/src/mongo/s/d_chunk_manager.h
index b93772ab43c..c30c67ddb5f 100644
--- a/src/mongo/s/d_chunk_manager.h
+++ b/src/mongo/s/d_chunk_manager.h
@@ -20,8 +20,8 @@
#include "mongo/pch.h"
-#include "../db/jsobj.h"
-#include "util.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/s/chunk_version.h"
namespace mongo {
@@ -77,7 +77,7 @@ namespace mongo {
* When cloning away the last chunk, verstion must be 0.
* @return a new ShardChunkManager, to be owned by the caller
*/
- ShardChunkManager* cloneMinus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
+ ShardChunkManager* cloneMinus( const BSONObj& min , const BSONObj& max , const ChunkVersion& version );
/**
* Generates a new manager based on 'this's state plus a given chunk.
@@ -86,7 +86,7 @@ namespace mongo {
* @param version that the resulting manager should be at. It can never be 0, though (see CloneMinus).
* @return a new ShardChunkManager, to be owned by the caller
*/
- ShardChunkManager* clonePlus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
+ ShardChunkManager* clonePlus( const BSONObj& min , const BSONObj& max , const ChunkVersion& version );
/**
* Generates a new manager by splitting an existing chunk at one or more points.
@@ -97,7 +97,7 @@ namespace mongo {
* @return a new ShardChunkManager with the chunk split, to be owned by the caller
*/
ShardChunkManager* cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- const ShardChunkVersion& version );
+ const ChunkVersion& version );
/**
* Checks whether a document belongs to this shard.
@@ -128,8 +128,8 @@ namespace mongo {
// accessors
- ShardChunkVersion getVersion() const { return _version; }
- ShardChunkVersion getCollVersion() const { return _collVersion; }
+ ChunkVersion getVersion() const { return _version; }
+ ChunkVersion getCollVersion() const { return _collVersion; }
BSONObj getKey() const { return _key.getOwned(); }
unsigned getNumChunks() const { return _chunksMap.size(); }
@@ -142,9 +142,9 @@ namespace mongo {
*/
bool _belongsToMe( const BSONObj& point ) const;
- ShardChunkVersion _collVersion;
- // highest ShardChunkVersion for which this ShardChunkManager's information is accurate
- ShardChunkVersion _version;
+ ChunkVersion _collVersion;
+ // highest ChunkVersion for which this ShardChunkManager's information is accurate
+ ChunkVersion _version;
// key pattern for chunks under this range
BSONObj _key;
diff --git a/src/mongo/s/d_logic.h b/src/mongo/s/d_logic.h
index 3e4fa2f5b91..f1958c6dc77 100644
--- a/src/mongo/s/d_logic.h
+++ b/src/mongo/s/d_logic.h
@@ -20,10 +20,9 @@
#include "mongo/pch.h"
-#include "../db/jsobj.h"
-
-#include "d_chunk_manager.h"
-#include "util.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/s/d_chunk_manager.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/util/concurrency/ticketholder.h"
namespace mongo {
@@ -31,7 +30,7 @@ namespace mongo {
class Database;
class DiskLoc;
- typedef ShardChunkVersion ConfigVersion;
+ typedef ChunkVersion ConfigVersion;
// --------------
// --- global state ---
@@ -107,7 +106,7 @@ namespace mongo {
* @param min max the chunk to eliminate from the current manager
* @param version at which the new manager should be at
*/
- void donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version );
+ void donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ChunkVersion version );
/**
* Creates and installs a new chunk manager for a given collection by reclaiming a previously donated chunk.
@@ -120,7 +119,7 @@ namespace mongo {
* @param min max the chunk to reclaim and add to the current manager
* @param version at which the new manager should be at
*/
- void undoDonateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version );
+ void undoDonateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ChunkVersion version );
/**
* Creates and installs a new chunk manager for a given collection by splitting one of its chunks in two or more.
@@ -136,7 +135,7 @@ namespace mongo {
* @param version at which the new manager should be at
*/
void splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- ShardChunkVersion version );
+ ChunkVersion version );
bool inCriticalMigrateSection();
@@ -232,7 +231,7 @@ namespace mongo {
/**
* @return true if the current threads shard version is ok, or not in sharded version
- * Also returns an error message and the Config/ShardChunkVersions causing conflicts
+ * Also returns an error message and the Config/ChunkVersions causing conflicts
*/
bool shardVersionOk( const string& ns , string& errmsg, ConfigVersion& received, ConfigVersion& wanted );
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index b257a49b485..74cbc87fe93 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -51,6 +51,7 @@
#include "mongo/db/repl.h"
#include "mongo/db/repl_block.h"
#include "mongo/s/chunk.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/config.h"
#include "mongo/s/d_logic.h"
#include "mongo/s/shard.h"
@@ -985,8 +986,8 @@ namespace mongo {
BSONObj chunkInfo = BSON("min" << min << "max" << max << "from" << fromShard.getName() << "to" << toShard.getName() );
configServer.logChange( "moveChunk.start" , ns , chunkInfo );
- ShardChunkVersion maxVersion;
- ShardChunkVersion startingVersion;
+ ChunkVersion maxVersion;
+ ChunkVersion startingVersion;
string myOldShard;
{
scoped_ptr<ScopedDbConnection> conn(
@@ -1009,7 +1010,7 @@ namespace mongo {
return false;
}
- maxVersion = ShardChunkVersion::fromBSON(x, ChunkType::DEPRECATED_lastmod());
+ maxVersion = ChunkVersion::fromBSON(x, ChunkType::DEPRECATED_lastmod());
verify(currChunk[ChunkType::shard()].type());
verify(currChunk[ChunkType::min()].type());
verify(currChunk[ChunkType::max()].type());
@@ -1182,7 +1183,7 @@ namespace mongo {
// 5.a
// we're under the collection lock here, so no other migrate can change maxVersion or ShardChunkManager state
migrateFromStatus.setInCriticalSection( true );
- ShardChunkVersion myVersion = maxVersion;
+ ChunkVersion myVersion = maxVersion;
myVersion.incMajor();
{
@@ -1247,7 +1248,7 @@ namespace mongo {
// version at which the next highest lastmod will be set
// if the chunk being moved is the last in the shard, nextVersion is that chunk's lastmod
// otherwise the highest version is from the chunk being bumped on the FROM-shard
- ShardChunkVersion nextVersion;
+ ChunkVersion nextVersion;
// we want to go only once to the configDB but perhaps change two chunks, the one being migrated and another
// local one (so to bump version for the entire shard)
@@ -1392,8 +1393,8 @@ namespace mongo {
Query(BSON(ChunkType::ns(ns)))
.sort(BSON(ChunkType::DEPRECATED_lastmod() << -1)));
- ShardChunkVersion checkVersion =
- ShardChunkVersion::fromBSON(doc[ChunkType::DEPRECATED_lastmod()]);
+ ChunkVersion checkVersion =
+ ChunkVersion::fromBSON(doc[ChunkType::DEPRECATED_lastmod()]);
if ( checkVersion.isEquivalentTo( nextVersion ) ) {
log() << "moveChunk commit confirmed" << migrateLog;
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 4053f371c13..5f680577988 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/queryoptimizer.h"
#include "mongo/s/chunk.h" // for static genID only
+#include "mongo/s/chunk_version.h"
#include "mongo/s/config.h"
#include "mongo/s/d_logic.h"
#include "mongo/s/type_chunk.h"
@@ -445,10 +446,10 @@ namespace mongo {
struct ChunkInfo {
BSONObj min;
BSONObj max;
- ShardChunkVersion lastmod;
+ ChunkVersion lastmod;
ChunkInfo() { }
- ChunkInfo( BSONObj aMin , BSONObj aMax , ShardChunkVersion aVersion ) : min(aMin) , max(aMax) , lastmod(aVersion) {}
+ ChunkInfo( BSONObj aMin , BSONObj aMax , ChunkVersion aVersion ) : min(aMin) , max(aMax) , lastmod(aVersion) {}
void appendShortVersion( const char* name, BSONObjBuilder& b ) const;
string toString() const;
};
@@ -581,7 +582,7 @@ namespace mongo {
// TODO This is a check migrate does to the letter. Factor it out and share. 2010-10-22
- ShardChunkVersion maxVersion;
+ ChunkVersion maxVersion;
string shard;
ChunkInfo origChunk;
{
@@ -593,7 +594,7 @@ namespace mongo {
Query(BSON(ChunkType::ns(ns)))
.sort(BSON(ChunkType::DEPRECATED_lastmod() << -1)));
- maxVersion = ShardChunkVersion::fromBSON(x, ChunkType::DEPRECATED_lastmod());
+ maxVersion = ChunkVersion::fromBSON(x, ChunkType::DEPRECATED_lastmod());
BSONObj currChunk =
conn->get()->findOne(ChunkType::ConfigNS,
@@ -641,11 +642,11 @@ namespace mongo {
origChunk.min = currMin.getOwned();
origChunk.max = currMax.getOwned();
- origChunk.lastmod = ShardChunkVersion::fromBSON(currChunk[ChunkType::DEPRECATED_lastmod()]);
+ origChunk.lastmod = ChunkVersion::fromBSON(currChunk[ChunkType::DEPRECATED_lastmod()]);
// since this could be the first call that enable sharding we also make sure to have the chunk manager up to date
shardingState.gotShardName( shard );
- ShardChunkVersion shardVersion;
+ ChunkVersion shardVersion;
shardingState.trySetVersion( ns , shardVersion /* will return updated */ );
log() << "splitChunk accepted at version " << shardVersion << endl;
@@ -661,7 +662,7 @@ namespace mongo {
LOG(1) << "before split on " << origChunk << endl;
vector<ChunkInfo> newChunks;
- ShardChunkVersion myVersion = maxVersion;
+ ChunkVersion myVersion = maxVersion;
BSONObj startKey = min;
splitKeys.push_back( max ); // makes it easier to have 'max' in the next loop. remove later.
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 427f7b046c3..2736dd5c90e 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -30,17 +30,16 @@
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/privilege.h"
-#include "../db/commands.h"
-#include "../db/jsobj.h"
-#include "../db/db.h"
-#include "../db/replutil.h"
-#include "../client/connpool.h"
-
-#include "../util/queue.h"
-
-#include "shard.h"
-#include "d_logic.h"
-#include "config.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/db/db.h"
+#include "mongo/db/replutil.h"
+#include "mongo/client/connpool.h"
+#include "mongo/s/chunk_version.h"
+#include "mongo/s/config.h"
+#include "mongo/s/d_logic.h"
+#include "mongo/s/shard.h"
+#include "mongo/util/queue.h"
#include "mongo/util/concurrency/ticketholder.h"
using namespace std;
@@ -148,7 +147,7 @@ namespace mongo {
}
}
- void ShardingState::donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version ) {
+ void ShardingState::donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ChunkVersion version ) {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
@@ -156,7 +155,7 @@ namespace mongo {
ShardChunkManagerPtr p = it->second;
// empty shards should have version 0
- version = ( p->getNumChunks() > 1 ) ? version : ShardChunkVersion( 0 , OID() );
+ version = ( p->getNumChunks() > 1 ) ? version : ChunkVersion( 0 , OID() );
ShardChunkManagerPtr cloned( p->cloneMinus( min , max , version ) );
// TODO: a bit dangerous to have two different zero-version states - no-manager and
@@ -164,7 +163,7 @@ namespace mongo {
_chunks[ns] = cloned;
}
- void ShardingState::undoDonateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version ) {
+ void ShardingState::undoDonateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ChunkVersion version ) {
scoped_lock lk( _mutex );
log() << "ShardingState::undoDonateChunk acquired _mutex" << endl;
@@ -175,7 +174,7 @@ namespace mongo {
}
void ShardingState::splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- ShardChunkVersion version ) {
+ ChunkVersion version ) {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
@@ -278,7 +277,7 @@ namespace mongo {
_chunks[ns] = p;
}
- ShardChunkVersion oldVersion = version;
+ ChunkVersion oldVersion = version;
version = p->getVersion();
return oldVersion.isEquivalentTo( version );
}
@@ -616,12 +615,12 @@ namespace mongo {
if ( oldVersion.isSet() && ! globalVersion.isSet() ) {
// this had been reset
- info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
+ info->setVersion( ns , ChunkVersion( 0, OID() ) );
}
if ( ! version.isSet() && ! globalVersion.isSet() ) {
// this connection is cleaning itself
- info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
+ info->setVersion( ns , ChunkVersion( 0, OID() ) );
return true;
}
@@ -638,7 +637,7 @@ namespace mongo {
// only setting global version on purpose
// need clients to re-find meta-data
shardingState.resetVersion( ns );
- info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
+ info->setVersion( ns , ChunkVersion( 0, OID() ) );
return true;
}
@@ -686,7 +685,7 @@ namespace mongo {
{
dbtemprelease unlock;
- ShardChunkVersion currVersion = version;
+ ChunkVersion currVersion = version;
if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'";
result.append( "ns" , ns );
diff --git a/src/mongo/s/default_version.cpp b/src/mongo/s/default_version.cpp
index cdbcd95bf1f..485eb4f51ac 100644
--- a/src/mongo/s/default_version.cpp
+++ b/src/mongo/s/default_version.cpp
@@ -17,7 +17,8 @@
*/
#include "pch.h"
-#include "s/util.h"
+
+#include "s/util.h" // for VersionManager
namespace mongo {
diff --git a/src/mongo/s/metadata_loader.cpp b/src/mongo/s/metadata_loader.cpp
index a3872007d38..2a43c71a053 100644
--- a/src/mongo/s/metadata_loader.cpp
+++ b/src/mongo/s/metadata_loader.cpp
@@ -20,6 +20,7 @@
#include "mongo/client/dbclientcursor.h"
#include "mongo/client/dbclientmockcursor.h"
#include "mongo/s/chunk_diff.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/collection_manager.h"
#include "mongo/s/type_chunk.h"
#include "mongo/s/type_collection.h"
@@ -89,8 +90,8 @@ namespace mongo {
CollectionManager* MetadataLoader::makeEmptyCollectionManager() {
CollectionManager* manager = new CollectionManager;
- manager->_maxCollVersion = ShardChunkVersion(1, 0, OID());
- manager->_maxShardVersion = ShardChunkVersion(1, 0, OID());
+ manager->_maxCollVersion = ChunkVersion(1, 0, OID());
+ manager->_maxShardVersion = ChunkVersion(1, 0, OID());
dassert(manager->isValid());
return manager;
}
@@ -153,7 +154,7 @@ namespace mongo {
}
manager->_key = BSONObj();
- manager->_maxShardVersion = ShardChunkVersion(1, 0, collDoc.getEpoch());
+ manager->_maxShardVersion = ChunkVersion(1, 0, collDoc.getEpoch());
manager->_maxCollVersion = manager->_maxShardVersion;
}
else {
@@ -173,13 +174,13 @@ namespace mongo {
CollectionManager* manager,
string* errMsg) {
- map<string,ShardChunkVersion> versionMap;
- manager->_maxCollVersion = ShardChunkVersion(0, 0, collDoc.getEpoch());
+ map<string,ChunkVersion> versionMap;
+ manager->_maxCollVersion = ChunkVersion(0, 0, collDoc.getEpoch());
// Check to see if we should use the old version or not.
if (oldManager) {
- ShardChunkVersion oldVersion = oldManager->getMaxShardVersion();
+ ChunkVersion oldVersion = oldManager->getMaxShardVersion();
if (oldVersion.isSet() && oldVersion.hasCompatibleEpoch(collDoc.getEpoch())) {
@@ -216,7 +217,7 @@ namespace mongo {
if (!cursor.get()) {
// 'errMsg' was filled by the getChunkCursor() call.
- manager->_maxCollVersion = ShardChunkVersion();
+ manager->_maxCollVersion = ChunkVersion();
manager->_chunksMap.clear();
connPtr->done();
return false;
@@ -243,7 +244,7 @@ namespace mongo {
warning() << *errMsg << endl;
- manager->_maxCollVersion = ShardChunkVersion();
+ manager->_maxCollVersion = ChunkVersion();
manager->_chunksMap.clear();
connPtr->done();
return false;
@@ -260,7 +261,7 @@ namespace mongo {
warning() << errMsg << endl;
- manager->_maxCollVersion = ShardChunkVersion();
+ manager->_maxCollVersion = ChunkVersion();
manager->_chunksMap.clear();
connPtr->done();
return false;
diff --git a/src/mongo/s/shard_version.cpp b/src/mongo/s/shard_version.cpp
index 1dcfa7ad391..7ce1bb6e4a3 100644
--- a/src/mongo/s/shard_version.cpp
+++ b/src/mongo/s/shard_version.cpp
@@ -18,12 +18,13 @@
#include "pch.h"
-#include "chunk.h"
-#include "config.h"
-#include "grid.h"
-#include "util.h"
-#include "shard.h"
-#include "writeback_listener.h"
+#include "mongo/s/chunk.h"
+#include "mongo/s/chunk_version.h"
+#include "mongo/s/config.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/shard.h"
+#include "mongo/s/util.h" // for SendStaleConfigException
+#include "mongo/s/writeback_listener.h"
namespace mongo {
@@ -198,7 +199,7 @@ namespace mongo {
<< conn_in->getServerAddress() << ")" );
throw SendStaleConfigException( ns, msg,
- refManager->getVersion( shard ), ShardChunkVersion( 0, OID() ));
+ refManager->getVersion( shard ), ChunkVersion( 0, OID() ));
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
@@ -209,7 +210,7 @@ namespace mongo {
}
- ShardChunkVersion version = ShardChunkVersion( 0, OID() );
+ ChunkVersion version = ChunkVersion( 0, OID() );
if ( isSharded && manager ) {
version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
diff --git a/src/mongo/s/strategy.cpp b/src/mongo/s/strategy.cpp
index 20e1e8da46e..3e60e60c02a 100644
--- a/src/mongo/s/strategy.cpp
+++ b/src/mongo/s/strategy.cpp
@@ -24,9 +24,11 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/commands.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/grid.h"
#include "mongo/s/request.h"
#include "mongo/s/server.h"
+#include "mongo/s/util.h" // for SendStaleConfigException
#include "mongo/s/writeback_listener.h"
#include "mongo/util/mongoutils/str.h"
@@ -46,7 +48,7 @@ namespace mongo {
else if ( conn.setVersion() ) {
conn.done();
// Version is zero b/c we don't yet have a way to get the local version conflict
- throw RecvStaleConfigException( r.getns() , "doWrite" , ShardChunkVersion( 0, OID() ), ShardChunkVersion( 0, OID() ), true );
+ throw RecvStaleConfigException( r.getns() , "doWrite" , ChunkVersion( 0, OID() ), ChunkVersion( 0, OID() ), true );
}
conn->say( r.m() );
conn.done();
@@ -77,7 +79,7 @@ namespace mongo {
if ( qr->resultFlags() & ResultFlag_ShardConfigStale ) {
dbcon.done();
// Version is zero b/c this is deprecated codepath
- throw RecvStaleConfigException( r.getns() , "Strategy::doQuery", ShardChunkVersion( 0, OID() ), ShardChunkVersion( 0, OID() ) );
+ throw RecvStaleConfigException( r.getns() , "Strategy::doQuery", ChunkVersion( 0, OID() ), ChunkVersion( 0, OID() ) );
}
}
diff --git a/src/mongo/s/strategy_shard.cpp b/src/mongo/s/strategy_shard.cpp
index 96b5aa344c0..26168783069 100644
--- a/src/mongo/s/strategy_shard.cpp
+++ b/src/mongo/s/strategy_shard.cpp
@@ -29,6 +29,7 @@
#include "mongo/db/namespacestring.h"
#include "mongo/s/client_info.h"
#include "mongo/s/chunk.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/cursors.h"
#include "mongo/s/grid.h"
#include "mongo/s/request.h"
@@ -582,8 +583,8 @@ namespace mongo {
<< group.shard
<< " at version "
<< (group.manager.get() ?
- group.manager->getVersion().toString() :
- ShardChunkVersion(0, OID()).toString())
+ group.manager->getVersion().toString() :
+ ChunkVersion(0, OID()).toString())
<< endl;
//
@@ -680,8 +681,8 @@ namespace mongo {
<< group.shard->toString()
<< " at version "
<< (group.manager.get() ?
- group.manager->getVersion().toString() :
- ShardChunkVersion(0, OID()).toString())
+ group.manager->getVersion().toString() :
+ ChunkVersion(0, OID()).toString())
<< causedBy(insertErr);
// If we're continuing-on-error and the insert error is superseded by
diff --git a/src/mongo/s/type_chunk.cpp b/src/mongo/s/type_chunk.cpp
index ce513a62a8e..a85b9ac396d 100644
--- a/src/mongo/s/type_chunk.cpp
+++ b/src/mongo/s/type_chunk.cpp
@@ -137,16 +137,16 @@ namespace mongo {
if (!FieldParser::extract(source, jumbo, false, &_jumbo, errMsg)) return false;
//
- // ShardChunkVersion backward compatibility logic contained in ShardChunkVersion
+ // ChunkVersion backward compatibility logic contained in ChunkVersion
//
- // ShardChunkVersion is currently encoded as { 'version': [<TS>,<OID>] }
+ // ChunkVersion is currently encoded as { 'version': [<TS>,<OID>] }
- if (ShardChunkVersion::canParseBSON(source, version())) {
- _version = ShardChunkVersion::fromBSON(source, version());
+ if (ChunkVersion::canParseBSON(source, version())) {
+ _version = ChunkVersion::fromBSON(source, version());
}
- else if (ShardChunkVersion::canParseBSON(source, DEPRECATED_lastmod())) {
- _version = ShardChunkVersion::fromBSON(source, DEPRECATED_lastmod());
+ else if (ChunkVersion::canParseBSON(source, DEPRECATED_lastmod())) {
+ _version = ChunkVersion::fromBSON(source, DEPRECATED_lastmod());
}
return true;
@@ -157,7 +157,7 @@ namespace mongo {
_ns.clear();
_min = BSONObj();
_max = BSONObj();
- _version = ShardChunkVersion();
+ _version = ChunkVersion();
_shard.clear();
_jumbo = false;
}
diff --git a/src/mongo/s/type_chunk.h b/src/mongo/s/type_chunk.h
index b47e6d8dcf4..f07d656e22c 100644
--- a/src/mongo/s/type_chunk.h
+++ b/src/mongo/s/type_chunk.h
@@ -21,7 +21,8 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h"
-#include "mongo/s/util.h" // for ShardChunkVersion
+#include "mongo/s/chunk_version.h"
+
namespace mongo {
/**
@@ -148,11 +149,11 @@ namespace mongo {
return _max;
}
- void setVersion(const ShardChunkVersion& version) {
+ void setVersion(const ChunkVersion& version) {
_version = version;
}
- const ShardChunkVersion& getVersion() const {
+ const ChunkVersion& getVersion() const {
return _version;
}
@@ -174,13 +175,13 @@ namespace mongo {
private:
// Convention: (M)andatory, (O)ptional, (S)pecial rule.
- string _name; // (M) chunk's id
- string _ns; // (M) collection this chunk is in
- BSONObj _min; // (M) first key of the range, inclusive
- BSONObj _max; // (M) last key of the range, non-inclusive
- ShardChunkVersion _version; // (M) version of this chunk
- string _shard; // (M) shard this chunk lives in
- bool _jumbo; // (O) too big to move?
+ string _name; // (M) chunk's id
+ string _ns; // (M) collection this chunk is in
+ BSONObj _min; // (M) first key of the range, inclusive
+ BSONObj _max; // (M) last key of the range, non-inclusive
+ ChunkVersion _version; // (M) version of this chunk
+ string _shard; // (M) shard this chunk lives in
+ bool _jumbo; // (O) too big to move?
};
} // namespace mongo
diff --git a/src/mongo/s/type_chunk_test.cpp b/src/mongo/s/type_chunk_test.cpp
index 4fb3c9a4954..72d801c3959 100644
--- a/src/mongo/s/type_chunk_test.cpp
+++ b/src/mongo/s/type_chunk_test.cpp
@@ -18,8 +18,8 @@
#include "mongo/bson/oid.h"
#include "mongo/bson/util/misc.h" // for Date_t
+#include "mongo/s/chunk_version.h"
#include "mongo/s/type_chunk.h"
-#include "mongo/s/util.h" // for ShardChunkVersion
#include "mongo/unittest/unittest.h"
namespace {
@@ -30,7 +30,7 @@ namespace {
using mongo::ChunkType;
using mongo::Date_t;
using mongo::OID;
- using mongo::ShardChunkVersion;
+ using mongo::ChunkVersion;
TEST(Validity, MissingFields) {
ChunkType chunk;
@@ -150,7 +150,7 @@ namespace {
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
ASSERT_EQUALS(chunk.getMin(), BSON("a" << 10));
ASSERT_EQUALS(chunk.getMax(), BSON("a" << 20));
- ShardChunkVersion fetchedVersion = chunk.getVersion();
+ ChunkVersion fetchedVersion = chunk.getVersion();
ASSERT_EQUALS(fetchedVersion._combined, 1ULL);
ASSERT_EQUALS(fetchedVersion._epoch, epoch);
ASSERT_EQUALS(chunk.getShard(), "shard0001");
@@ -174,7 +174,7 @@ namespace {
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
ASSERT_EQUALS(chunk.getMin(), BSON("a" << 10));
ASSERT_EQUALS(chunk.getMax(), BSON("a" << 20));
- ShardChunkVersion fetchedVersion = chunk.getVersion();
+ ChunkVersion fetchedVersion = chunk.getVersion();
ASSERT_EQUALS(fetchedVersion._combined, 1ULL);
ASSERT_EQUALS(fetchedVersion._epoch, epoch);
ASSERT_EQUALS(chunk.getShard(), "shard0001");
diff --git a/src/mongo/s/util.h b/src/mongo/s/util.h
index bbb51b43d83..f2507213661 100644
--- a/src/mongo/s/util.h
+++ b/src/mongo/s/util.h
@@ -19,6 +19,7 @@
#include "mongo/pch.h"
#include "mongo/db/jsobj.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/util/mongoutils/str.h"
/**
@@ -27,308 +28,12 @@
namespace mongo {
- //
- // ShardChunkVersions consist of a major/minor version scoped to a version epoch
- //
- struct ShardChunkVersion {
- union {
- struct {
- int _minor;
- int _major;
- };
- unsigned long long _combined;
- };
- OID _epoch;
-
- ShardChunkVersion() : _minor(0), _major(0), _epoch(OID()) {}
-
- //
- // Constructors shouldn't have default parameters here, since it's vital we track from
- // here on the epochs of versions, even if not used.
- //
-
- ShardChunkVersion( int major, int minor, const OID& epoch )
- : _minor(minor),_major(major), _epoch(epoch) {
- }
-
- ShardChunkVersion( unsigned long long ll, const OID& epoch )
- : _combined( ll ), _epoch(epoch) {
- }
-
- void inc( bool major ) {
- if ( major )
- incMajor();
- else
- incMinor();
- }
-
- void incMajor() {
- _major++;
- _minor = 0;
- }
-
- void incMinor() {
- _minor++;
- }
-
- // Incrementing an epoch creates a new, randomly generated identifier
- void incEpoch() {
- _epoch = OID::gen();
- _major = 0;
- _minor = 0;
- }
-
- // Note: this shouldn't be used as a substitute for version except in specific cases -
- // epochs make versions more complex
- unsigned long long toLong() const {
- return _combined;
- }
-
- bool isSet() const {
- return _combined > 0;
- }
-
- bool isEpochSet() const {
- return _epoch.isSet();
- }
-
- string toString() const {
- stringstream ss;
- // Similar to month/day/year. For the most part when debugging, we care about major
- // so it's first
- ss << _major << "|" << _minor << "||" << _epoch;
- return ss.str();
- }
-
- int majorVersion() const { return _major; }
- int minorVersion() const { return _minor; }
- OID epoch() const { return _epoch; }
-
- //
- // Explicit comparison operators - versions with epochs have non-trivial comparisons.
- // > < operators do not check epoch cases. Generally if using == we need to handle
- // more complex cases.
- //
-
- bool operator>( const ShardChunkVersion& otherVersion ) const {
- return this->_combined > otherVersion._combined;
- }
-
- bool operator>=( const ShardChunkVersion& otherVersion ) const {
- return this->_combined >= otherVersion._combined;
- }
-
- bool operator<( const ShardChunkVersion& otherVersion ) const {
- return this->_combined < otherVersion._combined;
- }
-
- bool operator<=( const ShardChunkVersion& otherVersion ) const {
- return this->_combined < otherVersion._combined;
- }
-
- //
- // Equivalence comparison types.
- //
-
- // Can we write to this data and not have a problem?
- bool isWriteCompatibleWith( const ShardChunkVersion& otherVersion ) const {
- if( ! hasCompatibleEpoch( otherVersion ) ) return false;
- return otherVersion._major == _major;
- }
-
- // Is this the same version?
- bool isEquivalentTo( const ShardChunkVersion& otherVersion ) const {
- if( ! hasCompatibleEpoch( otherVersion ) ) return false;
- return otherVersion._combined == _combined;
- }
-
- // Is this in the same epoch?
- bool hasCompatibleEpoch( const ShardChunkVersion& otherVersion ) const {
- return hasCompatibleEpoch( otherVersion._epoch );
- }
-
- bool hasCompatibleEpoch( const OID& otherEpoch ) const {
- // TODO : Change logic from eras are not-unequal to eras are equal
- if( otherEpoch.isSet() && _epoch.isSet() && otherEpoch != _epoch ) return false;
- return true;
- }
-
- //
- // BSON input/output
- //
- // The idea here is to make the BSON input style very flexible right now, so we
- // can then tighten it up in the next version. We can accept either a BSONObject field
- // with version and epoch, or version and epoch in different fields (either is optional).
- // In this case, epoch always is stored in a field name of the version field name + "Epoch"
- //
-
- //
- // { version : <TS> } and { version : [<TS>,<OID>] } format
- //
-
- static bool canParseBSON( const BSONElement& el, const string& prefix="" ){
- bool canParse;
- fromBSON( el, prefix, &canParse );
- return canParse;
- }
-
- static ShardChunkVersion fromBSON( const BSONElement& el, const string& prefix="" ){
- bool canParse;
- return fromBSON( el, prefix, &canParse );
- }
-
- static ShardChunkVersion fromBSON( const BSONElement& el,
- const string& prefix,
- bool* canParse )
- {
- *canParse = true;
-
- int type = el.type();
-
- if( type == Array ){
- return fromBSON( BSONArray( el.Obj() ), canParse );
- }
-
- if( type == jstOID ){
- return ShardChunkVersion( 0, 0, el.OID() );
- }
-
- if( el.isNumber() ){
- return ShardChunkVersion( static_cast<unsigned long long>( el.numberLong() ), OID() );
- }
-
- if( type == Timestamp || type == Date ){
- return ShardChunkVersion( el._numberLong(), OID() );
- }
-
- // Note - we used to throw here, we can't anymore b/c debug builds will be unhappy
- warning() << "can't load version from element type (" << (int)(el.type()) << ") "
- << el << endl;
-
- *canParse = false;
-
- return ShardChunkVersion( 0, OID() );
- }
-
- //
- // { version : <TS>, versionEpoch : <OID> } object format
- //
-
- static bool canParseBSON( const BSONObj& obj, const string& prefix="" ){
- bool canParse;
- fromBSON( obj, prefix, &canParse );
- return canParse;
- }
-
- static ShardChunkVersion fromBSON( const BSONObj& obj, const string& prefix="" ){
- bool canParse;
- return fromBSON( obj, prefix, &canParse );
- }
-
- static ShardChunkVersion fromBSON( const BSONObj& obj,
- const string& prefixIn,
- bool* canParse )
- {
- *canParse = true;
-
- string prefix = prefixIn;
- // "version" doesn't have a "cluster constanst" because that field is never
- // written to the config.
- if( prefixIn == "" && ! obj[ "version" ].eoo() ){
- prefix = (string)"version";
- }
- // TODO: use ChunkType::DEPRECATED_lastmod()
- // NOTE: type_chunk.h includes this file
- else if( prefixIn == "" && ! obj["lastmod"].eoo() ){
- prefix = (string)"lastmod";
- }
-
- ShardChunkVersion version = fromBSON( obj[ prefix ], prefixIn, canParse );
-
- if( obj[ prefix + "Epoch" ].type() == jstOID ){
- version._epoch = obj[ prefix + "Epoch" ].OID();
- *canParse = true;
- }
-
- return version;
- }
-
- //
- // { version : [<TS>, <OID>] } format
- //
-
- static bool canParseBSON( const BSONArray& arr ){
- bool canParse;
- fromBSON( arr, &canParse );
- return canParse;
- }
-
- static ShardChunkVersion fromBSON( const BSONArray& arr ){
- bool canParse;
- return fromBSON( arr, &canParse );
- }
-
- static ShardChunkVersion fromBSON( const BSONArray& arr,
- bool* canParse )
- {
- *canParse = false;
-
- ShardChunkVersion version;
-
- BSONObjIterator it( arr );
- if( ! it.more() ) return version;
-
- version = fromBSON( it.next(), "", canParse );
- if( ! canParse ) return version;
-
- *canParse = true;
-
- if( ! it.more() ) return version;
- BSONElement next = it.next();
- if( next.type() != jstOID ) return version;
-
- version._epoch = next.OID();
-
- return version;
- }
-
- //
- // Currently our BSON output is to two different fields, to cleanly work with older
- // versions that know nothing about epochs.
- //
-
- BSONObj toBSON( const string& prefixIn="" ) const {
- BSONObjBuilder b;
-
- string prefix = prefixIn;
- if( prefix == "" ) prefix = "version";
-
- b.appendTimestamp( prefix, _combined );
- b.append( prefix + "Epoch", _epoch );
- return b.obj();
- }
-
- void addToBSON( BSONObjBuilder& b, const string& prefix="" ) const {
- b.appendElements( toBSON( prefix ) );
- }
-
- void addEpochToBSON( BSONObjBuilder& b, const string& prefix="" ) const {
- b.append( prefix + "Epoch", _epoch );
- }
-
- };
-
- inline ostream& operator<<( ostream &s , const ShardChunkVersion& v) {
- s << v.toString();
- return s;
- }
-
/**
* your config info for a given shard/chunk is out of date
*/
class StaleConfigException : public AssertionException {
public:
- StaleConfigException( const string& ns , const string& raw , int code, ShardChunkVersion received, ShardChunkVersion wanted, bool justConnection = false )
+ StaleConfigException( const string& ns , const string& raw , int code, ChunkVersion received, ChunkVersion wanted, bool justConnection = false )
: AssertionException(
mongoutils::str::stream() << raw << " ( ns : " << ns <<
", received : " << received.toString() <<
@@ -345,16 +50,16 @@ namespace mongo {
StaleConfigException( const string& raw , int code, const BSONObj& error, bool justConnection = false )
: AssertionException( mongoutils::str::stream()
<< raw << " ( ns : " << ( error["ns"].type() == String ? error["ns"].String() : string("<unknown>") )
- << ", received : " << ShardChunkVersion::fromBSON( error, "vReceived" ).toString()
- << ", wanted : " << ShardChunkVersion::fromBSON( error, "vWanted" ).toString()
+ << ", received : " << ChunkVersion::fromBSON( error, "vReceived" ).toString()
+ << ", wanted : " << ChunkVersion::fromBSON( error, "vWanted" ).toString()
<< ", " << ( code == SendStaleConfigCode ? "send" : "recv" ) << " )",
code ),
_justConnection(justConnection) ,
// For legacy reasons, we may not always get a namespace here
_ns( error["ns"].type() == String ? error["ns"].String() : "" ),
- _received( ShardChunkVersion::fromBSON( error, "vReceived" ) ),
- _wanted( ShardChunkVersion::fromBSON( error, "vWanted" ) )
+ _received( ChunkVersion::fromBSON( error, "vReceived" ) ),
+ _wanted( ChunkVersion::fromBSON( error, "vWanted" ) )
{}
// Needs message so when we trace all exceptions on construction we get a useful
@@ -391,8 +96,8 @@ namespace mongo {
return true;
}
- ShardChunkVersion getVersionReceived() const { return _received; }
- ShardChunkVersion getVersionWanted() const { return _wanted; }
+ ChunkVersion getVersionReceived() const { return _received; }
+ ChunkVersion getVersionWanted() const { return _wanted; }
StaleConfigException& operator=( const StaleConfigException& elem ) {
@@ -409,13 +114,13 @@ namespace mongo {
private:
bool _justConnection;
string _ns;
- ShardChunkVersion _received;
- ShardChunkVersion _wanted;
+ ChunkVersion _received;
+ ChunkVersion _wanted;
};
class SendStaleConfigException : public StaleConfigException {
public:
- SendStaleConfigException( const string& ns , const string& raw , ShardChunkVersion received, ShardChunkVersion wanted, bool justConnection = false )
+ SendStaleConfigException( const string& ns , const string& raw , ChunkVersion received, ChunkVersion wanted, bool justConnection = false )
: StaleConfigException( ns, raw, SendStaleConfigCode, received, wanted, justConnection ) {}
SendStaleConfigException( const string& raw , const BSONObj& error, bool justConnection = false )
: StaleConfigException( raw, SendStaleConfigCode, error, justConnection ) {}
@@ -423,7 +128,7 @@ namespace mongo {
class RecvStaleConfigException : public StaleConfigException {
public:
- RecvStaleConfigException( const string& ns , const string& raw , ShardChunkVersion received, ShardChunkVersion wanted, bool justConnection = false )
+ RecvStaleConfigException( const string& ns , const string& raw , ChunkVersion received, ChunkVersion wanted, bool justConnection = false )
: StaleConfigException( ns, raw, RecvStaleConfigCode, received, wanted, justConnection ) {}
RecvStaleConfigException( const string& raw , const BSONObj& error, bool justConnection = false )
: StaleConfigException( raw, RecvStaleConfigCode, error, justConnection ) {}
diff --git a/src/mongo/s/writeback_listener.cpp b/src/mongo/s/writeback_listener.cpp
index 4e4fcbae139..3cd78b59d1a 100644
--- a/src/mongo/s/writeback_listener.cpp
+++ b/src/mongo/s/writeback_listener.cpp
@@ -18,18 +18,17 @@
#include "pch.h"
-#include "../util/timer.h"
+#include "writeback_listener.h"
#include "mongo/db/auth/authorization_manager.h"
-#include "config.h"
-#include "grid.h"
-#include "request.h"
-#include "server.h"
-#include "shard.h"
-#include "util.h"
-#include "client_info.h"
-
-#include "writeback_listener.h"
+#include "mongo/s/chunk_version.h"
+#include "mongo/s/client_info.h"
+#include "mongo/s/config.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/request.h"
+#include "mongo/s/server.h"
+#include "mongo/s/shard.h"
+#include "mongo/util/timer.h"
namespace mongo {
@@ -134,7 +133,7 @@ namespace mongo {
void WriteBackListener::run() {
int secsToSleep = 0;
- scoped_ptr<ShardChunkVersion> lastNeededVersion;
+ scoped_ptr<ChunkVersion> lastNeededVersion;
int lastNeededCount = 0;
bool needsToReloadShardInfo = false;
@@ -195,7 +194,7 @@ namespace mongo {
massert( 10427 , "invalid writeback message" , msg.header()->valid() );
DBConfigPtr db = grid.getDBConfig( ns );
- ShardChunkVersion needVersion = ShardChunkVersion::fromBSON( data, "version" );
+ ChunkVersion needVersion = ChunkVersion::fromBSON( data, "version" );
//
// TODO: Refactor the sharded strategy to correctly handle all sharding state changes itself,
@@ -208,7 +207,7 @@ namespace mongo {
ShardPtr primary;
db->getChunkManagerOrPrimary( ns, manager, primary );
- ShardChunkVersion currVersion;
+ ChunkVersion currVersion;
if( manager ) currVersion = manager->getVersion();
LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
@@ -242,7 +241,7 @@ namespace mongo {
// Set our lastNeededVersion for next time
//
- lastNeededVersion.reset( new ShardChunkVersion( needVersion ) );
+ lastNeededVersion.reset( new ChunkVersion( needVersion ) );
lastNeededCount++;
//