summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShaun Verch <shaun.verch@10gen.com>2012-11-29 18:16:08 -0800
committerShaun Verch <shaun.verch@10gen.com>2012-12-18 11:39:20 -0500
commit566cf01bd870036b0dfd4b2babaf414b65de444c (patch)
tree62183f841b816d4bd2a7b38fe9cd8b33c1ba8960
parent95aec02bdeb20e4529c140464996b8b47d9dd7e8 (diff)
downloadmongo-566cf01bd870036b0dfd4b2babaf414b65de444c.tar.gz
SERVER-939 Changed ChunkFields to ChunkType
-rw-r--r--src/mongo/dbtests/d_chunk_manager_tests.cpp145
-rw-r--r--src/mongo/dbtests/sharding.cpp84
-rw-r--r--src/mongo/s/balance.cpp8
-rw-r--r--src/mongo/s/balancer_policy.cpp4
-rw-r--r--src/mongo/s/balancer_policy.h6
-rw-r--r--src/mongo/s/balancer_policy_tests.cpp80
-rw-r--r--src/mongo/s/chunk.cpp52
-rw-r--r--src/mongo/s/chunk_diff.hpp32
-rw-r--r--src/mongo/s/cluster_constants.cpp11
-rw-r--r--src/mongo/s/cluster_constants.h21
-rw-r--r--src/mongo/s/commands_admin.cpp6
-rw-r--r--src/mongo/s/config.cpp28
-rw-r--r--src/mongo/s/config_migrate.cpp14
-rw-r--r--src/mongo/s/d_chunk_manager.cpp14
-rw-r--r--src/mongo/s/d_migrate.cpp73
-rw-r--r--src/mongo/s/d_split.cpp57
-rw-r--r--src/mongo/s/type_chunk.h18
-rw-r--r--src/mongo/s/util.h3
18 files changed, 314 insertions, 342 deletions
diff --git a/src/mongo/dbtests/d_chunk_manager_tests.cpp b/src/mongo/dbtests/d_chunk_manager_tests.cpp
index ab8bbb8f7f1..b90e5cdf210 100644
--- a/src/mongo/dbtests/d_chunk_manager_tests.cpp
+++ b/src/mongo/dbtests/d_chunk_manager_tests.cpp
@@ -22,6 +22,7 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/s/d_chunk_manager.h"
#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
namespace {
@@ -34,10 +35,10 @@ namespace {
CollectionFields::unique(false));
// single-chunk collection
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("test.foo-a_MinKey") <<
- ChunkFields::ns("test.foo") <<
- ChunkFields::min(BSON("a" << MINKEY)) <<
- ChunkFields::max(BSON("a" << MAXKEY))));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("test.foo-a_MinKey") <<
+ ChunkType::ns("test.foo") <<
+ ChunkType::min(BSON("a" << MINKEY)) <<
+ ChunkType::max(BSON("a" << MAXKEY))));
ShardChunkManager s ( collection , chunks );
@@ -59,11 +60,11 @@ namespace {
CollectionFields::unique(false));
// single-chunk collection
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("test.foo-a_MinKeyb_MinKey") <<
- ChunkFields::ns("test.foo") <<
- ChunkFields::min(BSON("a" << MINKEY <<
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("test.foo-a_MinKeyb_MinKey") <<
+ ChunkType::ns("test.foo") <<
+ ChunkType::min(BSON("a" << MINKEY <<
"b" << MINKEY)) <<
- ChunkFields::max(BSON("a" << MAXKEY <<
+ ChunkType::max(BSON("a" << MAXKEY <<
"b" << MINKEY))));
ShardChunkManager s ( collection , chunks );
@@ -89,20 +90,20 @@ namespace {
// 3-chunk collection, 2 of them being contiguous
// [min->10) , [10->20) , <gap> , [30->max)
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("x.y-a_MinKey") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(BSON("a" << MINKEY)) <<
- ChunkFields::max(BSON("a" << 10))) <<
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("x.y-a_MinKey") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(BSON("a" << MINKEY)) <<
+ ChunkType::max(BSON("a" << 10))) <<
- BSON(ChunkFields::name("x.y-a_10") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(BSON("a" << 10)) <<
- ChunkFields::max(BSON("a" << 20))) <<
+ BSON(ChunkType::name("x.y-a_10") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(BSON("a" << 10)) <<
+ ChunkType::max(BSON("a" << 20))) <<
- BSON(ChunkFields::name("x.y-a_30") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(BSON("a" << 30)) <<
- ChunkFields::max(BSON("a" << MAXKEY))));
+ BSON(ChunkType::name("x.y-a_30") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(BSON("a" << 30)) <<
+ ChunkType::max(BSON("a" << MAXKEY))));
ShardChunkManager s ( collection , chunks );
@@ -146,10 +147,10 @@ namespace {
// [10->20]
BSONObj key_a10 = BSON( "a" << 10 );
BSONObj key_a20 = BSON( "a" << 20 );
- BSONArray chunks2 = BSON_ARRAY(BSON(ChunkFields::name("x.y-a_10") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(key_a10) <<
- ChunkFields::max(key_a20)));
+ BSONArray chunks2 = BSON_ARRAY(BSON(ChunkType::name("x.y-a_10") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(key_a10) <<
+ ChunkType::max(key_a20)));
ShardChunkManager s2( collection , chunks2 );
ASSERT( s2.getNextChunk( empty , &foundMin , &foundMax ) );
ASSERT( foundMin.woCompare( key_a10 ) == 0 );
@@ -160,18 +161,18 @@ namespace {
BSONObj key_a30 = BSON( "a" << 30 );
BSONObj key_min = BSON( "a" << MINKEY );
BSONObj key_max = BSON( "a" << MAXKEY );
- BSONArray chunks3 = BSON_ARRAY(BSON(ChunkFields::name("x.y-a_MinKey") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(key_min) <<
- ChunkFields::max(key_a10)) <<
- BSON(ChunkFields::name("x.y-a_10") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(key_a10) <<
- ChunkFields::max(key_a20)) <<
- BSON(ChunkFields::name("x.y-a_30") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(key_a30) <<
- ChunkFields::max(key_max)));
+ BSONArray chunks3 = BSON_ARRAY(BSON(ChunkType::name("x.y-a_MinKey") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(key_min) <<
+ ChunkType::max(key_a10)) <<
+ BSON(ChunkType::name("x.y-a_10") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(key_a10) <<
+ ChunkType::max(key_a20)) <<
+ BSON(ChunkType::name("x.y-a_30") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(key_a30) <<
+ ChunkType::max(key_max)));
ShardChunkManager s3( collection , chunks3 );
ASSERT( ! s3.getNextChunk( empty , &foundMin , &foundMax ) ); // not eof
ASSERT( foundMin.woCompare( key_min ) == 0 );
@@ -205,10 +206,10 @@ namespace {
// 1-chunk collection
// [10,0-20,0)
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("test.foo-a_MinKey") <<
- ChunkFields::ns("test.foo") <<
- ChunkFields::min(BSON("a" << 10 << "b" << 0)) <<
- ChunkFields::max(BSON("a" << 20 << "b" << 0))));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("test.foo-a_MinKey") <<
+ ChunkType::ns("test.foo") <<
+ ChunkType::min(BSON("a" << 10 << "b" << 0)) <<
+ ChunkType::max(BSON("a" << 20 << "b" << 0))));
ShardChunkManager s ( collection , chunks );
@@ -238,10 +239,10 @@ namespace {
// 1-chunk collection
// [10,0-20,0)
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("test.foo-a_MinKey") <<
- ChunkFields::ns("test.foo") <<
- ChunkFields::min(BSON("a" << 10 << "b" << 0)) <<
- ChunkFields::max(BSON("a" << 20 << "b" << 0))));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("test.foo-a_MinKey") <<
+ ChunkType::ns("test.foo") <<
+ ChunkType::min(BSON("a" << 10 << "b" << 0)) <<
+ ChunkType::max(BSON("a" << 20 << "b" << 0))));
ShardChunkManager s ( collection , chunks );
@@ -262,14 +263,14 @@ namespace {
// 2-chunk collection
// [10,0->20,0) , <gap> , [30,0->40,0)
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("x.y-a_10b_0") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(BSON("a" << 10 << "b" << 0)) <<
- ChunkFields::max(BSON("a" << 20 << "b" << 0))) <<
- BSON(ChunkFields::name("x.y-a_30b_0") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(BSON("a" << 30 << "b" << 0)) <<
- ChunkFields::max(BSON("a" << 40 << "b" << 0))));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("x.y-a_10b_0") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(BSON("a" << 10 << "b" << 0)) <<
+ ChunkType::max(BSON("a" << 20 << "b" << 0))) <<
+ BSON(ChunkType::name("x.y-a_30b_0") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(BSON("a" << 30 << "b" << 0)) <<
+ ChunkType::max(BSON("a" << 40 << "b" << 0))));
ShardChunkManager s ( collection , chunks );
@@ -301,14 +302,14 @@ namespace {
// 2-chunk collection
// [10,0->20,0) , <gap> , [30,0->40,0)
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("x.y-a_10b_0") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(BSON("a" << 10 << "b" << 0)) <<
- ChunkFields::max(BSON("a" << 20 << "b" << 0))) <<
- BSON(ChunkFields::name("x.y-a_30b_0") <<
- ChunkFields::ns("x.y") <<
- ChunkFields::min(BSON("a" << 30 << "b" << 0)) <<
- ChunkFields::max(BSON("a" << 40 << "b" << 0))));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("x.y-a_10b_0") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(BSON("a" << 10 << "b" << 0)) <<
+ ChunkType::max(BSON("a" << 20 << "b" << 0))) <<
+ BSON(ChunkType::name("x.y-a_30b_0") <<
+ ChunkType::ns("x.y") <<
+ ChunkType::min(BSON("a" << 30 << "b" << 0)) <<
+ ChunkType::max(BSON("a" << 40 << "b" << 0))));
ShardChunkManager s ( collection , chunks );
@@ -337,10 +338,10 @@ namespace {
// [10,0-20,0)
BSONObj min = BSON( "a" << 10 << "b" << 0 );
BSONObj max = BSON( "a" << 20 << "b" << 0 );
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("test.foo-a_MinKey") <<
- ChunkFields::ns("test.foo") <<
- ChunkFields::min(min) <<
- ChunkFields::max(max)));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("test.foo-a_MinKey") <<
+ ChunkType::ns("test.foo") <<
+ ChunkType::min(min) <<
+ ChunkType::max(max)));
ShardChunkManager s ( collection , chunks );
@@ -376,10 +377,10 @@ namespace {
// [10,0-20,0)
BSONObj min = BSON( "a" << 10 << "b" << 0 );
BSONObj max = BSON( "a" << 20 << "b" << 0 );
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("test.foo-a_MinKey") <<
- ChunkFields::ns("test.foo") <<
- ChunkFields::min(min) <<
- ChunkFields::max(max)));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("test.foo-a_MinKey") <<
+ ChunkType::ns("test.foo") <<
+ ChunkType::min(min) <<
+ ChunkType::max(max)));
ShardChunkManager s ( collection , chunks );
@@ -425,10 +426,10 @@ namespace {
// 1-chunk collection
// [10->20)
- BSONArray chunks = BSON_ARRAY(BSON(ChunkFields::name("test.foo-a_10") <<
- ChunkFields::ns("test.foo") <<
- ChunkFields::min(BSON("a" << 10)) <<
- ChunkFields::max(BSON("a" << 20))));
+ BSONArray chunks = BSON_ARRAY(BSON(ChunkType::name("test.foo-a_10") <<
+ ChunkType::ns("test.foo") <<
+ ChunkType::min(BSON("a" << 10)) <<
+ ChunkType::max(BSON("a" << 20))));
ShardChunkManager s( collection , chunks );
BSONObj min = BSON( "a" << 10 );
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index 14055aa8c84..365f481b0b1 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -23,7 +23,7 @@
#include "mongo/client/dbclientmockcursor.h"
#include "mongo/client/parallel.h"
#include "mongo/s/chunk_diff.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
namespace ShardingTests {
@@ -203,13 +203,13 @@ namespace ShardingTests {
ChunkManager manager( collName(), ShardKeyPattern( BSON( "_id" << 1 ) ), false );
manager.createFirstChunks( shard().getConnString(), shard(), NULL, NULL );
- BSONObj firstChunk = client().findOne(ConfigNS::chunk, BSONObj()).getOwned();
+ BSONObj firstChunk = client().findOne(ChunkType::ConfigNS, BSONObj()).getOwned();
- ASSERT(firstChunk[ChunkFields::min()].Obj()[ "_id" ].type() == MinKey );
- ASSERT(firstChunk[ChunkFields::max()].Obj()[ "_id" ].type() == MaxKey );
+ ASSERT(firstChunk[ChunkType::min()].Obj()[ "_id" ].type() == MinKey );
+ ASSERT(firstChunk[ChunkType::max()].Obj()[ "_id" ].type() == MaxKey );
ShardChunkVersion version = ShardChunkVersion::fromBSON(firstChunk,
- ChunkFields::lastmod());
+ ChunkType::DEPRECATED_lastmod());
ASSERT( version.majorVersion() == 1 );
ASSERT( version.minorVersion() == 0 );
@@ -261,7 +261,7 @@ namespace ShardingTests {
createChunks( keyName );
auto_ptr<DBClientCursor> cursor =
- client().query(ConfigNS::chunk, QUERY(ChunkFields::ns(collName())));
+ client().query(ChunkType::ConfigNS, QUERY(ChunkType::ns(collName())));
set<int> minorVersions;
OID epoch;
@@ -272,7 +272,7 @@ namespace ShardingTests {
BSONObj chunk = cursor->next();
ShardChunkVersion version = ShardChunkVersion::fromBSON(chunk,
- ChunkFields::lastmod());
+ ChunkType::DEPRECATED_lastmod());
ASSERT( version.majorVersion() == 1 );
ASSERT( version.isEpochSet() );
@@ -283,7 +283,7 @@ namespace ShardingTests {
ASSERT( minorVersions.find( version.minorVersion() ) == minorVersions.end() );
minorVersions.insert( version.minorVersion() );
- ASSERT(chunk[ChunkFields::shard()].String() == shard().getName());
+ ASSERT(chunk[ChunkType::shard()].String() == shard().getName());
}
}
@@ -300,13 +300,13 @@ namespace ShardingTests {
string keyName = "_id";
createChunks( keyName );
- int numChunks = static_cast<int>(client().count(ConfigNS::chunk,
- BSON(ChunkFields::ns(collName()))));
+ int numChunks = static_cast<int>(client().count(ChunkType::ConfigNS,
+ BSON(ChunkType::ns(collName()))));
- BSONObj firstChunk = client().findOne(ConfigNS::chunk, BSONObj()).getOwned();
+ BSONObj firstChunk = client().findOne(ChunkType::ConfigNS, BSONObj()).getOwned();
ShardChunkVersion version = ShardChunkVersion::fromBSON(firstChunk,
- ChunkFields::lastmod());
+ ChunkType::DEPRECATED_lastmod());
// Make manager load existing chunks
ChunkManagerPtr manager( new ChunkManager( collName(), ShardKeyPattern( BSON( "_id" << 1 ) ), false ) );
@@ -319,9 +319,9 @@ namespace ShardingTests {
// Modify chunks collection
BSONObjBuilder b;
ShardChunkVersion laterVersion = ShardChunkVersion( 2, 1, version.epoch() );
- laterVersion.addToBSON(b, ChunkFields::lastmod());
+ laterVersion.addToBSON(b, ChunkType::DEPRECATED_lastmod());
- client().update(ConfigNS::chunk, BSONObj(), BSON( "$set" << b.obj()));
+ client().update(ChunkType::ConfigNS, BSONObj(), BSON( "$set" << b.obj()));
// Make new manager load chunk diff
ChunkManager newManager( manager );
@@ -417,13 +417,13 @@ namespace ShardingTests {
}
ShardChunkVersion version =
- ShardChunkVersion::fromBSON(chunkDoc[ChunkFields::lastmod()]);
+ ShardChunkVersion::fromBSON(chunkDoc[ChunkType::DEPRECATED_lastmod()]);
if( version > foundMaxVersion ) foundMaxVersion = version;
ShardChunkVersion shardMaxVersion =
- foundMaxShardVersions[chunkDoc[ChunkFields::shard()].String()];
+ foundMaxShardVersions[chunkDoc[ChunkType::shard()].String()];
if( version > shardMaxVersion ) {
- foundMaxShardVersions[chunkDoc[ChunkFields::shard()].String() ] = version;
+ foundMaxShardVersions[chunkDoc[ChunkType::shard()].String() ] = version;
}
}
@@ -481,15 +481,15 @@ namespace ShardingTests {
if( i >= 0 ){
BSONObjBuilder chunkB;
- chunkB.append(ChunkFields::min(), lastSplitPt );
- chunkB.append(ChunkFields::max(), splitPt );
+ chunkB.append(ChunkType::min(), lastSplitPt );
+ chunkB.append(ChunkType::max(), splitPt );
int shardNum = rand( numShards );
- chunkB.append(ChunkFields::shard(),
+ chunkB.append(ChunkType::shard(),
"shard" + string( 1, (char)('A' + shardNum) ) );
rand( 2 ) ? version.incMajor() : version.incMinor();
- version.addToBSON(chunkB, ChunkFields::lastmod());
+ version.addToBSON(chunkB, ChunkType::DEPRECATED_lastmod());
chunksB.append( chunkB.obj() );
}
@@ -547,11 +547,11 @@ namespace ShardingTests {
for( int k = 0; k < keySize; k++ ){
string field = string( "k" ) + string( 1, (char)('0' + k) );
- BSONType maxType = chunk[ChunkFields::max()].Obj()[field].type();
+ BSONType maxType = chunk[ChunkType::max()].Obj()[field].type();
double max = maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0;
- BSONType minType = chunk[ChunkFields::min()].Obj()[field].type();
+ BSONType minType = chunk[ChunkType::min()].Obj()[field].type();
double min = minType == NumberDouble ?
- chunk[ChunkFields::min()].Obj()[field].Number() :
+ chunk[ChunkType::min()].Obj()[field].Number() :
0.0;
if( minType == MinKey ){
@@ -569,19 +569,19 @@ namespace ShardingTests {
// Only happens if we can't split the min chunk
if( midPt.isEmpty() ) continue;
- leftB.append( chunk[ChunkFields::min()] );
- leftB.append(ChunkFields::max(), midPt );
- rightB.append(ChunkFields::min(), midPt );
- rightB.append(chunk[ChunkFields::max()] );
+ leftB.append( chunk[ChunkType::min()] );
+ leftB.append(ChunkType::max(), midPt );
+ rightB.append(ChunkType::min(), midPt );
+ rightB.append(chunk[ChunkType::max()] );
- leftB.append(chunk[ChunkFields::shard()] );
- rightB.append(chunk[ChunkFields::shard()] );
+ leftB.append(chunk[ChunkType::shard()] );
+ rightB.append(chunk[ChunkType::shard()] );
version.incMajor();
version._minor = 0;
- version.addToBSON(leftB, ChunkFields::lastmod());
+ version.addToBSON(leftB, ChunkType::DEPRECATED_lastmod());
version.incMinor();
- version.addToBSON(rightB, ChunkFields::lastmod());
+ version.addToBSON(rightB, ChunkType::DEPRECATED_lastmod());
BSONObj left = leftB.obj();
BSONObj right = rightB.obj();
@@ -604,8 +604,8 @@ namespace ShardingTests {
BSONObj prevShardChunk;
while( chunksIt.more() ){
prevShardChunk = chunksIt.next().Obj();
- if( prevShardChunk[ChunkFields::shard()].String() ==
- chunk[ChunkFields::shard()].String() ) break;
+ if( prevShardChunk[ChunkType::shard()].String() ==
+ chunk[ChunkType::shard()].String() ) break;
// log() << "... appending chunk from diff shard: " << prevShardChunk << endl;
newChunksB.append( prevShardChunk );
@@ -619,21 +619,21 @@ namespace ShardingTests {
BSONObjBuilder newShardB;
BSONObjBuilder prevShardB;
- newShardB.append(chunk[ChunkFields::min()]);
- newShardB.append(chunk[ChunkFields::max()]);
- prevShardB.append(prevShardChunk[ChunkFields::min()]);
- prevShardB.append(prevShardChunk[ChunkFields::max()]);
+ newShardB.append(chunk[ChunkType::min()]);
+ newShardB.append(chunk[ChunkType::max()]);
+ prevShardB.append(prevShardChunk[ChunkType::min()]);
+ prevShardB.append(prevShardChunk[ChunkType::max()]);
int shardNum = rand( numShards );
- newShardB.append(ChunkFields::shard(),
+ newShardB.append(ChunkType::shard(),
"shard" + string( 1, (char)('A' + shardNum)));
- prevShardB.append(prevShardChunk[ChunkFields::shard()]);
+ prevShardB.append(prevShardChunk[ChunkType::shard()]);
version.incMajor();
version._minor = 0;
- version.addToBSON(newShardB, ChunkFields::lastmod());
+ version.addToBSON(newShardB, ChunkType::DEPRECATED_lastmod());
version.incMinor();
- version.addToBSON(prevShardB, ChunkFields::lastmod());
+ version.addToBSON(prevShardB, ChunkType::DEPRECATED_lastmod());
BSONObj newShard = newShardB.obj();
BSONObj prevShard = prevShardB.obj();
diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp
index 92c38f0312c..42903dc58aa 100644
--- a/src/mongo/s/balance.cpp
+++ b/src/mongo/s/balance.cpp
@@ -23,7 +23,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/cmdline.h"
#include "mongo/s/chunk.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/server.h"
@@ -213,12 +213,12 @@ namespace mongo {
const string& ns = *it;
map< string,vector<BSONObj> > shardToChunksMap;
- cursor = conn.query(ConfigNS::chunk,
- QUERY(ChunkFields::ns(ns)).sort(ChunkFields::min()));
+ cursor = conn.query(ChunkType::ConfigNS,
+ QUERY(ChunkType::ns(ns)).sort(ChunkType::min()));
while ( cursor->more() ) {
BSONObj chunk = cursor->nextSafe();
- vector<BSONObj>& chunks = shardToChunksMap[chunk[ChunkFields::shard()].String()];
+ vector<BSONObj>& chunks = shardToChunksMap[chunk[ChunkType::shard()].String()];
chunks.push_back( chunk.getOwned() );
}
cursor.reset();
diff --git a/src/mongo/s/balancer_policy.cpp b/src/mongo/s/balancer_policy.cpp
index 74e53c2e0e1..3edf832da58 100644
--- a/src/mongo/s/balancer_policy.cpp
+++ b/src/mongo/s/balancer_policy.cpp
@@ -169,7 +169,7 @@ namespace mongo {
if ( _tagRanges.size() == 0 )
return "";
- BSONObj min = chunk[ChunkFields::min()].Obj();
+ BSONObj min = chunk[ChunkType::min()].Obj();
map<BSONObj,TagRange>::const_iterator i = _tagRanges.upper_bound( min );
if ( i == _tagRanges.end() )
@@ -205,7 +205,7 @@ namespace mongo {
}
bool BalancerPolicy::_isJumbo( const BSONObj& chunk ) {
- if ( chunk[ChunkFields::jumbo()].trueValue() ) {
+ if ( chunk[ChunkType::jumbo()].trueValue() ) {
LOG(1) << "chunk: " << chunk << "is marked as jumbo" << endl;
return true;
}
diff --git a/src/mongo/s/balancer_policy.h b/src/mongo/s/balancer_policy.h
index fc22889cbfb..467de2256b4 100644
--- a/src/mongo/s/balancer_policy.h
+++ b/src/mongo/s/balancer_policy.h
@@ -20,7 +20,7 @@
#define S_BALANCER_POLICY_HEADER
#include "mongo/db/jsobj.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
namespace mongo {
@@ -33,8 +33,8 @@ namespace mongo {
: min( a_min.getOwned() ), max( a_max.getOwned() ){}
ChunkInfo( const BSONObj& chunk )
- : min(chunk[ChunkFields::min()].Obj().getOwned()),
- max(chunk[ChunkFields::max()].Obj().getOwned()) {
+ : min(chunk[ChunkType::min()].Obj().getOwned()),
+ max(chunk[ChunkType::max()].Obj().getOwned()) {
}
string toString() const;
diff --git a/src/mongo/s/balancer_policy_tests.cpp b/src/mongo/s/balancer_policy_tests.cpp
index df1d22d9b64..9f90c7f0875 100644
--- a/src/mongo/s/balancer_policy_tests.cpp
+++ b/src/mongo/s/balancer_policy_tests.cpp
@@ -15,7 +15,7 @@
#include "mongo/platform/random.h"
#include "mongo/s/balancer_policy.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
#include "mongo/s/config.h"
#include "mongo/unittest/unittest.h"
@@ -46,10 +46,10 @@ namespace mongo {
// 2 chunks and 0 chunk shards
ShardToChunksMap chunkMap;
vector<BSONObj> chunks;
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << BSON("$minKey"<<1))) <<
- ChunkFields::max(BSON("x" << 49))));
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 49)) <<
- ChunkFields::max(BSON("x" << BSON("$maxkey"<<1)))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << BSON("$minKey"<<1))) <<
+ ChunkType::max(BSON("x" << 49))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 49)) <<
+ ChunkType::max(BSON("x" << BSON("$maxkey"<<1)))));
chunkMap["shard0"] = chunks;
chunks.clear();
chunkMap["shard1"] = chunks;
@@ -70,20 +70,20 @@ namespace mongo {
// 2 chunks and 0 chunk shards
ShardToChunksMap chunkMap;
vector<BSONObj> chunks;
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << BSON("$minKey"<<1))) <<
- ChunkFields::max(BSON("x" << 10)) <<
- ChunkFields::jumbo(true)));
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 10)) <<
- ChunkFields::max(BSON("x" << 20)) <<
- ChunkFields::jumbo(true)));
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 20)) <<
- ChunkFields::max(BSON("x" << 30))));
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 30)) <<
- ChunkFields::max(BSON("x" << 40)) <<
- ChunkFields::jumbo(true)));
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 40)) <<
- ChunkFields::max(BSON("x" << BSON("$maxkey"<<1))) <<
- ChunkFields::jumbo(true)));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << BSON("$minKey"<<1))) <<
+ ChunkType::max(BSON("x" << 10)) <<
+ ChunkType::jumbo(true)));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 10)) <<
+ ChunkType::max(BSON("x" << 20)) <<
+ ChunkType::jumbo(true)));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 20)) <<
+ ChunkType::max(BSON("x" << 30))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 30)) <<
+ ChunkType::max(BSON("x" << 40)) <<
+ ChunkType::jumbo(true)));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 40)) <<
+ ChunkType::max(BSON("x" << BSON("$maxkey"<<1))) <<
+ ChunkType::jumbo(true)));
chunkMap["shard0"] = chunks;
chunks.clear();
chunkMap["shard1"] = chunks;
@@ -106,12 +106,12 @@ namespace mongo {
// 2 chunks and 0 chunk shards
ShardToChunksMap chunkMap;
vector<BSONObj> chunks;
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << BSON("$minKey"<<1))) <<
- ChunkFields::max(BSON("x" << 49))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << BSON("$minKey"<<1))) <<
+ ChunkType::max(BSON("x" << 49))));
chunkMap["shard0"] = chunks;
chunks.clear();
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 49))<<
- ChunkFields::max(BSON("x" << BSON("$maxkey"<<1)))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 49))<<
+ ChunkType::max(BSON("x" << BSON("$maxkey"<<1)))));
chunkMap["shard1"] = chunks;
// shard0 is draining
@@ -131,10 +131,10 @@ namespace mongo {
// 2 chunks and 0 chunk (drain completed) shards
ShardToChunksMap chunkMap;
vector<BSONObj> chunks;
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << BSON("$minKey"<<1))) <<
- ChunkFields::max(BSON("x" << 49))));
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 49))<<
- ChunkFields::max(BSON("x" << BSON("$maxkey"<<1)))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << BSON("$minKey"<<1))) <<
+ ChunkType::max(BSON("x" << 49))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 49))<<
+ ChunkType::max(BSON("x" << BSON("$maxkey"<<1)))));
chunkMap["shard0"] = chunks;
chunks.clear();
chunkMap["shard1"] = chunks;
@@ -154,12 +154,12 @@ namespace mongo {
// 2 chunks and 0 chunk shards
ShardToChunksMap chunkMap;
vector<BSONObj> chunks;
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << BSON("$minKey"<<1))) <<
- ChunkFields::max(BSON("x" << 49))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << BSON("$minKey"<<1))) <<
+ ChunkType::max(BSON("x" << 49))));
chunkMap["shard0"] = chunks;
chunks.clear();
- chunks.push_back(BSON(ChunkFields::min(BSON("x" << 49)) <<
- ChunkFields::max(BSON("x" << BSON("$maxkey"<<1)))));
+ chunks.push_back(BSON(ChunkType::min(BSON("x" << 49)) <<
+ ChunkType::max(BSON("x" << BSON("$maxkey"<<1)))));
chunkMap["shard1"] = chunks;
// shard0 is draining, shard1 is maxed out, shard2 has writebacks pending
@@ -197,7 +197,7 @@ namespace mongo {
else
max = BSON( "x" << 1 + total + i );
- chunks.push_back( BSON(ChunkFields::min(min) << ChunkFields::max(max)));
+ chunks.push_back( BSON(ChunkType::min(min) << ChunkType::max(max)));
}
}
@@ -205,7 +205,7 @@ namespace mongo {
void moveChunk( ShardToChunksMap& map, MigrateInfo* m ) {
vector<BSONObj>& chunks = map[m->from];
for ( vector<BSONObj>::iterator i = chunks.begin(); i != chunks.end(); ++i ) {
- if (i->getField(ChunkFields::min()).Obj() == m->chunk.min) {
+ if (i->getField(ChunkType::min()).Obj() == m->chunk.min) {
map[m->to].push_back( *i );
chunks.erase( i );
return;
@@ -317,13 +317,13 @@ namespace mongo {
ASSERT( ! d.addTagRange( TagRange( BSON( "x" << 22 ), BSON( "x" << 28 ) , "c" ) ) );
ASSERT( ! d.addTagRange( TagRange( BSON( "x" << 28 ), BSON( "x" << 33 ) , "c" ) ) );
- ASSERT_EQUALS("", d.getTagForChunk(BSON(ChunkFields::min(BSON("x" << -4)))));
- ASSERT_EQUALS("", d.getTagForChunk(BSON(ChunkFields::min(BSON("x" << 0)))));
- ASSERT_EQUALS("a", d.getTagForChunk(BSON(ChunkFields::min(BSON("x" << 1)))));
- ASSERT_EQUALS("b", d.getTagForChunk(BSON(ChunkFields::min(BSON("x" << 10)))));
- ASSERT_EQUALS("b", d.getTagForChunk(BSON(ChunkFields::min(BSON("x" << 15)))));
- ASSERT_EQUALS("c", d.getTagForChunk(BSON(ChunkFields::min(BSON("x" << 25)))));
- ASSERT_EQUALS("", d.getTagForChunk(BSON(ChunkFields::min(BSON("x" << 35)))));
+ ASSERT_EQUALS("", d.getTagForChunk(BSON(ChunkType::min(BSON("x" << -4)))));
+ ASSERT_EQUALS("", d.getTagForChunk(BSON(ChunkType::min(BSON("x" << 0)))));
+ ASSERT_EQUALS("a", d.getTagForChunk(BSON(ChunkType::min(BSON("x" << 1)))));
+ ASSERT_EQUALS("b", d.getTagForChunk(BSON(ChunkType::min(BSON("x" << 10)))));
+ ASSERT_EQUALS("b", d.getTagForChunk(BSON(ChunkType::min(BSON("x" << 15)))));
+ ASSERT_EQUALS("c", d.getTagForChunk(BSON(ChunkType::min(BSON("x" << 25)))));
+ ASSERT_EQUALS("", d.getTagForChunk(BSON(ChunkType::min(BSON("x" << 35)))));
}
/**
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index fbf80a4d5d0..8ae0e4b6db7 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -55,16 +55,16 @@ namespace mongo {
Chunk::Chunk(const ChunkManager * manager, BSONObj from)
: _manager(manager), _lastmod(0, OID()), _dataWritten(mkDataWritten())
{
- string ns = from.getStringField(ChunkFields::ns().c_str());
- _shard.reset(from.getStringField(ChunkFields::shard().c_str()));
+ string ns = from.getStringField(ChunkType::ns().c_str());
+ _shard.reset(from.getStringField(ChunkType::shard().c_str()));
- _lastmod = ShardChunkVersion::fromBSON(from[ChunkFields::lastmod()]);
+ _lastmod = ShardChunkVersion::fromBSON(from[ChunkType::DEPRECATED_lastmod()]);
verify( _lastmod.isSet() );
- _min = from.getObjectField(ChunkFields::min().c_str()).getOwned();
- _max = from.getObjectField(ChunkFields::max().c_str()).getOwned();
+ _min = from.getObjectField(ChunkType::min().c_str()).getOwned();
+ _max = from.getObjectField(ChunkType::max().c_str()).getOwned();
- _jumbo = from[ChunkFields::jumbo()].trueValue();
+ _jumbo = from[ChunkType::jumbo()].trueValue();
uassert( 10170 , "Chunk needs a ns" , ! ns.empty() );
uassert( 13327 , "Chunk ns must match server ns" , ns == _manager->getns() );
@@ -473,8 +473,8 @@ namespace mongo {
void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ) const {
BSONObjBuilder bb( b.subobjStart( name ) );
- bb.append(ChunkFields::min(), _min);
- bb.append(ChunkFields::max(), _max);
+ bb.append(ChunkType::min(), _min);
+ bb.append(ChunkType::max(), _max);
bb.done();
}
@@ -487,19 +487,19 @@ namespace mongo {
to.append( "_id" , genID( _manager->getns() , _min ) );
if ( myLastMod.isSet() ) {
- myLastMod.addToBSON(to, ChunkFields::lastmod());
+ myLastMod.addToBSON(to, ChunkType::DEPRECATED_lastmod());
}
else if ( _lastmod.isSet() ) {
- _lastmod.addToBSON(to, ChunkFields::lastmod());
+ _lastmod.addToBSON(to, ChunkType::DEPRECATED_lastmod());
}
else {
verify(0);
}
- to << ChunkFields::ns(_manager->getns());
- to << ChunkFields::min(_min);
- to << ChunkFields::max(_max);
- to << ChunkFields::shard(_shard.getName());
+ to << ChunkType::ns(_manager->getns());
+ to << ChunkType::min(_min);
+ to << ChunkType::max(_max);
+ to << ChunkType::shard(_shard.getName());
}
string Chunk::genID( const string& ns , const BSONObj& o ) {
@@ -517,11 +517,11 @@ namespace mongo {
string Chunk::toString() const {
stringstream ss;
- ss << ChunkFields::ns() << ":" << _manager->getns() <<
- ChunkFields::shard() << ": " << _shard.toString() <<
- ChunkFields::lastmod() << ": " << _lastmod.toString() <<
- ChunkFields::min() << ": " << _min <<
- ChunkFields::max() << ": " << _max;
+ ss << ChunkType::ns() << ":" << _manager->getns() <<
+ ChunkType::shard() << ": " << _shard.toString() <<
+ ChunkType::DEPRECATED_lastmod() << ": " << _lastmod.toString() <<
+ ChunkType::min() << ": " << _min <<
+ ChunkType::max() << ": " << _max;
return ss.str();
}
@@ -540,9 +540,9 @@ namespace mongo {
ScopedDbConnection::getInternalScopedDbConnection(
configServer.modelServer(), 30));
- conn->get()->update(ConfigNS::chunk,
- BSON(ChunkFields::name(genID())),
- BSON("$set" << BSON(ChunkFields::jumbo(true))));
+ conn->get()->update(ChunkType::ConfigNS,
+ BSON(ChunkType::name(genID())),
+ BSON("$set" << BSON(ChunkType::jumbo(true))));
conn->done();
}
catch ( DBException& e ) {
@@ -1002,7 +1002,7 @@ namespace mongo {
// Make sure we don't have any chunks that already exist here
unsigned long long existingChunks =
- conn->get()->count(ConfigNS::chunk, BSON(ChunkFields::ns(_ns)));
+ conn->get()->count(ChunkType::ConfigNS, BSON(ChunkType::ns(_ns)));
uassert( 13449 , str::stream() << "collection " << _ns << " already sharded with "
<< existingChunks << " chunks", existingChunks == 0 );
@@ -1017,8 +1017,8 @@ namespace mongo {
temp.serialize( chunkBuilder );
BSONObj chunkObj = chunkBuilder.obj();
- conn->get()->update(ConfigNS::chunk,
- QUERY(ChunkFields::name(temp.genID())),
+ conn->get()->update(ChunkType::ConfigNS,
+ QUERY(ChunkType::name(temp.genID())),
chunkObj,
true,
false );
@@ -1251,7 +1251,7 @@ namespace mongo {
// remove chunk data
scoped_ptr<ScopedDbConnection> conn(
ScopedDbConnection::getInternalScopedDbConnection( configServer.modelServer() ) );
- conn->get()->remove(ConfigNS::chunk, BSON(ChunkFields::ns(_ns)));
+ conn->get()->remove(ChunkType::ConfigNS, BSON(ChunkType::ns(_ns)));
conn->done();
LOG(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl;
diff --git a/src/mongo/s/chunk_diff.hpp b/src/mongo/s/chunk_diff.hpp
index b49f49a8d6b..319a426cbea 100644
--- a/src/mongo/s/chunk_diff.hpp
+++ b/src/mongo/s/chunk_diff.hpp
@@ -19,7 +19,7 @@
#pragma once
#include "mongo/s/chunk_diff.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
namespace mongo {
@@ -89,7 +89,7 @@ namespace mongo {
// Open a cursor for the diff chunks
auto_ptr<DBClientCursor> cursor = conn->get()->query(
- ConfigNS::chunk, diffQuery, 0, 0, 0, 0, ( DEBUG_BUILD ? 2 : 1000000 ) );
+ ChunkType::ConfigNS, diffQuery, 0, 0, 0, 0, ( DEBUG_BUILD ? 2 : 1000000 ) );
verify( cursor.get() );
int diff = calculateConfigDiff( *cursor.get() );
@@ -130,11 +130,11 @@ namespace mongo {
BSONObj diffChunkDoc = diffCursor.next();
- ShardChunkVersion chunkVersion = ShardChunkVersion::fromBSON(diffChunkDoc, ChunkFields::lastmod());
+ ShardChunkVersion chunkVersion = ShardChunkVersion::fromBSON(diffChunkDoc, ChunkType::DEPRECATED_lastmod());
- if( diffChunkDoc[ChunkFields::min()].type() != Object ||
- diffChunkDoc[ChunkFields::max()].type() != Object ||
- diffChunkDoc[ChunkFields::shard()].type() != String )
+ if( diffChunkDoc[ChunkType::min()].type() != Object ||
+ diffChunkDoc[ChunkType::max()].type() != Object ||
+ diffChunkDoc[ChunkType::shard()].type() != String )
{
warning() << "got invalid chunk document " << diffChunkDoc
<< " when trying to load differing chunks" << endl;
@@ -157,15 +157,15 @@ namespace mongo {
if( chunkVersion > *_maxVersion ) *_maxVersion = chunkVersion;
// Chunk version changes
- ShardType shard = shardFor( diffChunkDoc[ChunkFields::shard()].String() );
+ ShardType shard = shardFor( diffChunkDoc[ChunkType::shard()].String() );
typename map<ShardType, ShardChunkVersion>::iterator shardVersionIt = _maxShardVersions->find( shard );
if( shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion ){
(*_maxShardVersions)[ shard ] = chunkVersion;
}
// See if we need to remove any chunks we are currently tracking b/c of this chunk's changes
- removeOverlapping(diffChunkDoc[ChunkFields::min()].Obj(),
- diffChunkDoc[ChunkFields::max()].Obj());
+ removeOverlapping(diffChunkDoc[ChunkType::min()].Obj(),
+ diffChunkDoc[ChunkType::max()].Obj());
// Figure out which of the new chunks we need to track
// Important - we need to actually own this doc, in case the cursor decides to getMore or unbuffer
@@ -180,8 +180,8 @@ namespace mongo {
BSONObj chunkDoc = *it;
// Important - we need to make sure we actually own the min and max here
- BSONObj min = chunkDoc[ChunkFields::min()].Obj().getOwned();
- BSONObj max = chunkDoc[ChunkFields::max()].Obj().getOwned();
+ BSONObj min = chunkDoc[ChunkType::min()].Obj().getOwned();
+ BSONObj max = chunkDoc[ChunkType::max()].Obj().getOwned();
// Invariant enforced by sharding
// It's possible to read inconsistent state b/c of getMore() and yielding, so we want
@@ -218,7 +218,7 @@ namespace mongo {
if( rand() % 2 ) numStaleMinorClauses = maxMinorVersionClauses;
#endif
- queryB.append(ChunkFields::ns(), _ns);
+ queryB.append(ChunkType::ns(), _ns);
//
// If we have only a few minor versions to refresh, we can be more selective in our query
@@ -232,7 +232,7 @@ namespace mongo {
{
BSONObjBuilder queryNewB( queryOrB.subobjStart() );
{
- BSONObjBuilder ts(queryNewB.subobjStart(ChunkFields::lastmod()));
+ BSONObjBuilder ts(queryNewB.subobjStart(ChunkType::DEPRECATED_lastmod()));
// We should *always* pull at least a single chunk back, this lets us quickly
// detect if our collection was unsharded (and most of the time if it was
// resharded) in the meantime
@@ -249,9 +249,9 @@ namespace mongo {
for( typename map<ShardType, ShardChunkVersion>::const_iterator it = _maxShardVersions->begin(); it != _maxShardVersions->end(); it++ ){
BSONObjBuilder queryShardB( queryOrB.subobjStart() );
- queryShardB.append(ChunkFields::shard(), nameFrom( it->first ) );
+ queryShardB.append(ChunkType::shard(), nameFrom( it->first ) );
{
- BSONObjBuilder ts(queryShardB.subobjStart(ChunkFields::lastmod()));
+ BSONObjBuilder ts(queryShardB.subobjStart(ChunkType::DEPRECATED_lastmod()));
ts.appendTimestamp( "$gt", it->second.toLong() );
ts.done();
}
@@ -264,7 +264,7 @@ namespace mongo {
BSONObjBuilder queryShardB( queryOrB.subobjStart() );
{
- BSONObjBuilder ts(queryShardB.subobjStart(ChunkFields::lastmod()));
+ BSONObjBuilder ts(queryShardB.subobjStart(ChunkType::DEPRECATED_lastmod()));
ts.appendTimestamp( "$gt", it->toLong() );
ts.appendTimestamp( "$lt",
ShardChunkVersion( it->majorVersion() + 1, 0, OID() ).toLong() );
diff --git a/src/mongo/s/cluster_constants.cpp b/src/mongo/s/cluster_constants.cpp
index 5412d61fd9a..46873c1591b 100644
--- a/src/mongo/s/cluster_constants.cpp
+++ b/src/mongo/s/cluster_constants.cpp
@@ -45,17 +45,6 @@ namespace mongo {
BSONField<bool> CollectionFields::noBalance("noBalance");
BSONField<OID> CollectionFields::epoch("lastmodEpoch");
- const string ConfigNS::chunk = "config.chunks";
- BSONField<string> ChunkFields::name("_id");
- BSONField<string> ChunkFields::ns("ns");
- BSONField<BSONObj> ChunkFields::min("min");
- BSONField<BSONObj> ChunkFields::max("max");
- BSONField<string> ChunkFields::lastmod("lastmod");
- BSONField<string> ChunkFields::shard("shard");
- BSONField<bool> ChunkFields::jumbo("jumbo");
- BSONField<OID> ChunkFields::lastmodEpoch("lastmodEpoch");
- BSONField<BSONArray> ChunkFields::NEW_lastmod("lastmod");
-
const string ConfigNS::tag = "config.tags";
BSONField<string> TagFields::ns("ns");
BSONField<string> TagFields::tag("tag");
diff --git a/src/mongo/s/cluster_constants.h b/src/mongo/s/cluster_constants.h
index 32612a82553..aa37e90b8ef 100644
--- a/src/mongo/s/cluster_constants.h
+++ b/src/mongo/s/cluster_constants.h
@@ -86,27 +86,6 @@ namespace mongo {
};
/**
- * ChunkFields holds all the field names and types for the chunks collection.
- */
- struct ChunkFields {
- static BSONField<string> name; // chunk's id
- static BSONField<string> ns; // namespace this collection is in
- static BSONField<BSONObj> min; // first key of the chunk, including
- static BSONField<BSONObj> max; // last key of the chunk, non-including
- static BSONField<string> lastmod; // major | minor versions
- static BSONField<string> shard; // home of this chunk
- static BSONField<bool> jumbo; // too big to move?
-
- // Transition to new format, 2.2 -> 2.4
- // 2.2 can read both lastmod + lastmodEpoch format and 2.4 [ lastmod, OID ] formats.
- static BSONField<OID> lastmodEpoch; // OID, to disambiguate collection incarnations
-
- // Being added in 2.4
- // This will deprecate lastmod + lastmodEpoch format.
- static BSONField<BSONArray> NEW_lastmod; // [Date_t, OID] format
- };
-
- /**
* TagFields holds all the field names and types for the tags collection.
*/
struct TagFields {
diff --git a/src/mongo/s/commands_admin.cpp b/src/mongo/s/commands_admin.cpp
index c0043bf6309..b6ab051d986 100644
--- a/src/mongo/s/commands_admin.cpp
+++ b/src/mongo/s/commands_admin.cpp
@@ -28,7 +28,7 @@
#include "mongo/db/stats/counters.h"
#include "mongo/s/chunk.h"
#include "mongo/s/client_info.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/stats.h"
@@ -1195,8 +1195,8 @@ namespace mongo {
// If the server has been completely drained, remove it from the ConfigDB.
// Check not only for chunks but also databases.
- BSONObj shardIDDoc = BSON(ChunkFields::shard(shardDoc[ShardFields::name()].str()));
- long long chunkCount = conn->count(ConfigNS::chunk, shardIDDoc);
+ BSONObj shardIDDoc = BSON(ChunkType::shard(shardDoc[ShardFields::name()].str()));
+ long long chunkCount = conn->count(ChunkType::ConfigNS, shardIDDoc);
long long dbCount = conn->count( ConfigNS::database , primaryDoc );
if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) {
log() << "going to remove shard: " << s.getName() << endl;
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index d40a1b7a6ad..9426d781252 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -26,7 +26,7 @@
#include "mongo/db/pdfile.h"
#include "mongo/db/cmdline.h"
#include "mongo/s/chunk.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/server.h"
@@ -318,12 +318,12 @@ namespace mongo {
if ( oldVersion.isSet() && ! forceReload ) {
scoped_ptr<ScopedDbConnection> conn( ScopedDbConnection::getInternalScopedDbConnection(
configServer.modelServer(), 30.0 ) );
- newest = conn->get()->findOne(ConfigNS::chunk,
- Query(BSON(ChunkFields::ns(ns))).sort(ChunkFields::lastmod(), -1));
+ newest = conn->get()->findOne(ChunkType::ConfigNS,
+ Query(BSON(ChunkType::ns(ns))).sort(ChunkType::DEPRECATED_lastmod(), -1));
conn->done();
if ( ! newest.isEmpty() ) {
- ShardChunkVersion v = ShardChunkVersion::fromBSON(newest, ChunkFields::lastmod());
+ ShardChunkVersion v = ShardChunkVersion::fromBSON(newest, ChunkType::DEPRECATED_lastmod());
if ( v.isEquivalentTo( oldVersion ) ) {
scoped_lock lk( _lock );
CollectionInfo& ci = _collections[ns];
@@ -354,7 +354,7 @@ namespace mongo {
if ( ci.isSharded() && ci.getCM() ) {
ShardChunkVersion currentVersion =
- ShardChunkVersion::fromBSON(newest, ChunkFields::lastmod());
+ ShardChunkVersion::fromBSON(newest, ChunkType::DEPRECATED_lastmod());
// Only reload if the version we found is newer than our own in the same
// epoch
@@ -941,17 +941,17 @@ namespace mongo {
}
// indexes
- conn->get()->ensureIndex(ConfigNS::chunk,
- BSON(ChunkFields::ns() << 1 << ChunkFields::min() << 1 ), true);
+ conn->get()->ensureIndex(ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << 1 << ChunkType::min() << 1 ), true);
- conn->get()->ensureIndex(ConfigNS::chunk,
- BSON(ChunkFields::ns() << 1 <<
- ChunkFields::shard() << 1 <<
- ChunkFields::min() << 1 ), true);
+ conn->get()->ensureIndex(ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << 1 <<
+ ChunkType::shard() << 1 <<
+ ChunkType::min() << 1 ), true);
- conn->get()->ensureIndex(ConfigNS::chunk,
- BSON(ChunkFields::ns() << 1 <<
- ChunkFields::lastmod() << 1 ), true );
+ conn->get()->ensureIndex(ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << 1 <<
+ ChunkType::DEPRECATED_lastmod() << 1 ), true );
conn->get()->ensureIndex(ConfigNS::shard, BSON(ShardFields::host() << 1), true);
diff --git a/src/mongo/s/config_migrate.cpp b/src/mongo/s/config_migrate.cpp
index 426f1b379d5..6d0fbaa5a70 100644
--- a/src/mongo/s/config_migrate.cpp
+++ b/src/mongo/s/config_migrate.cpp
@@ -27,7 +27,7 @@
#include "mongo/s/server.h"
#include "mongo/s/config.h"
#include "mongo/s/chunk.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
namespace mongo {
@@ -171,19 +171,19 @@ namespace mongo {
{
unsigned num = 0;
map<string,BSONObj> chunks;
- auto_ptr<DBClientCursor> c = conn->query(ConfigNS::chunk, BSONObj());
+ auto_ptr<DBClientCursor> c = conn->query(ChunkType::ConfigNS, BSONObj());
while ( c->more() ) {
BSONObj x = c->next();
BSONObjBuilder b;
- string id = Chunk::genID(x[ChunkFields::ns()].String(),
- x[ChunkFields::min()].Obj() );
+ string id = Chunk::genID(x[ChunkType::ns()].String(),
+ x[ChunkType::min()].Obj() );
b.append( "_id" , id );
BSONObjIterator i(x);
while ( i.more() ) {
BSONElement e = i.next();
- if (strcmp(e.fieldName(), ChunkFields::name().c_str()) == 0)
+ if (strcmp(e.fieldName(), ChunkType::name().c_str()) == 0)
continue;
b.append( e );
}
@@ -196,9 +196,9 @@ namespace mongo {
verify( num == chunks.size() );
- conn->remove(ConfigNS::chunk , BSONObj());
+ conn->remove(ChunkType::ConfigNS , BSONObj());
for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ) {
- conn->insert(ConfigNS::chunk, i->second);
+ conn->insert(ChunkType::ConfigNS, i->second);
}
}
diff --git a/src/mongo/s/d_chunk_manager.cpp b/src/mongo/s/d_chunk_manager.cpp
index 8f53a9cc866..c30c16632cf 100644
--- a/src/mongo/s/d_chunk_manager.cpp
+++ b/src/mongo/s/d_chunk_manager.cpp
@@ -25,7 +25,7 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/instance.h"
#include "mongo/s/chunk_diff.h"
-#include "mongo/s/cluster_constants.h"
+#include "mongo/s/type_chunk.h"
namespace mongo {
@@ -40,8 +40,8 @@ namespace mongo {
SCMConfigDiffTracker( const string& currShard ) : _currShard( currShard ) {}
virtual bool isTracked( const BSONObj& chunkDoc ) const {
- return chunkDoc[ChunkFields::shard()].type() == String &&
- chunkDoc[ChunkFields::shard()].String() == _currShard;
+ return chunkDoc[ChunkType::shard()].type() == String &&
+ chunkDoc[ChunkType::shard()].String() == _currShard;
}
virtual BSONObj maxFrom( const BSONObj& val ) const {
@@ -125,7 +125,7 @@ namespace mongo {
// Need to do the query ourselves, since we may use direct conns to the db
Query query = differ.configDiffQuery();
- auto_ptr<DBClientCursor> cursor = conn->query(ConfigNS::chunk, query);
+ auto_ptr<DBClientCursor> cursor = conn->query(ChunkType::ConfigNS, query);
uassert( 16181, str::stream() << "could not initialize cursor to config server chunks collection for ns " << ns, cursor.get() );
@@ -193,10 +193,10 @@ namespace mongo {
ShardChunkVersion version;
while ( cursor->more() ) {
BSONObj d = cursor->next();
- _chunksMap.insert(make_pair(d[ChunkFields::min()].Obj().getOwned(),
- d[ChunkFields::max()].Obj().getOwned()));
+ _chunksMap.insert(make_pair(d[ChunkType::min()].Obj().getOwned(),
+ d[ChunkType::max()].Obj().getOwned()));
- ShardChunkVersion currVersion = ShardChunkVersion::fromBSON(d[ChunkFields::lastmod()]);
+ ShardChunkVersion currVersion = ShardChunkVersion::fromBSON(d[ChunkType::DEPRECATED_lastmod()]);
if ( currVersion > version ) {
version = currVersion;
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 4f9eb259ae0..52c8e8690c8 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -63,6 +63,7 @@
#include "d_logic.h"
#include "config.h"
#include "chunk.h"
+#include "mongo/s/type_chunk.h"
using namespace std;
@@ -1000,12 +1001,12 @@ namespace mongo {
BSONObj x;
BSONObj currChunk;
try{
- x = conn->get()->findOne(ConfigNS::chunk,
- Query(BSON(ChunkFields::ns(ns)))
- .sort(BSON(ChunkFields::lastmod() << -1)));
+ x = conn->get()->findOne(ChunkType::ConfigNS,
+ Query(BSON(ChunkType::ns(ns)))
+ .sort(BSON(ChunkType::DEPRECATED_lastmod() << -1)));
- currChunk = conn->get()->findOne(ConfigNS::chunk,
- shardId.wrap(ChunkFields::name().c_str()));
+ currChunk = conn->get()->findOne(ChunkType::ConfigNS,
+ shardId.wrap(ChunkType::name().c_str()));
}
catch( DBException& e ){
errmsg = str::stream() << "aborted moveChunk because could not get chunk data from config server " << shardingState.getConfigServer() << causedBy( e );
@@ -1013,15 +1014,15 @@ namespace mongo {
return false;
}
- maxVersion = ShardChunkVersion::fromBSON(x, ChunkFields::lastmod());
- verify(currChunk[ChunkFields::shard()].type());
- verify(currChunk[ChunkFields::min()].type());
- verify(currChunk[ChunkFields::max()].type());
- myOldShard = currChunk[ChunkFields::shard()].String();
+ maxVersion = ShardChunkVersion::fromBSON(x, ChunkType::DEPRECATED_lastmod());
+ verify(currChunk[ChunkType::shard()].type());
+ verify(currChunk[ChunkType::min()].type());
+ verify(currChunk[ChunkType::max()].type());
+ myOldShard = currChunk[ChunkType::shard()].String();
conn->done();
- BSONObj currMin = currChunk[ChunkFields::min()].Obj();
- BSONObj currMax = currChunk[ChunkFields::max()].Obj();
+ BSONObj currMin = currChunk[ChunkType::min()].Obj();
+ BSONObj currMax = currChunk[ChunkType::max()].Obj();
if ( currMin.woCompare( min ) || currMax.woCompare( max ) ) {
errmsg = "boundaries are outdated (likely a split occurred)";
result.append( "currMin" , currMin );
@@ -1266,19 +1267,19 @@ namespace mongo {
BSONObjBuilder op;
op.append( "op" , "u" );
op.appendBool( "b" , false /* no upserting */ );
- op.append( "ns" , ConfigNS::chunk );
+ op.append( "ns" , ChunkType::ConfigNS );
BSONObjBuilder n( op.subobjStart( "o" ) );
- n.append(ChunkFields::name(), Chunk::genID(ns, min));
- myVersion.addToBSON(n, ChunkFields::lastmod());
- n.append(ChunkFields::ns(), ns);
- n.append(ChunkFields::min(), min);
- n.append(ChunkFields::max(), max);
- n.append(ChunkFields::shard(), toShard.getName());
+ n.append(ChunkType::name(), Chunk::genID(ns, min));
+ myVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod());
+ n.append(ChunkType::ns(), ns);
+ n.append(ChunkType::min(), min);
+ n.append(ChunkType::max(), max);
+ n.append(ChunkType::shard(), toShard.getName());
n.done();
BSONObjBuilder q( op.subobjStart( "o2" ) );
- q.append(ChunkFields::name(), Chunk::genID(ns, min));
+ q.append(ChunkType::name(), Chunk::genID(ns, min));
q.done();
updates.append( op.obj() );
@@ -1305,20 +1306,20 @@ namespace mongo {
BSONObjBuilder op;
op.append( "op" , "u" );
op.appendBool( "b" , false );
- op.append( "ns" , ConfigNS::chunk );
+ op.append( "ns" , ChunkType::ConfigNS );
nextVersion.incMinor(); // same as used on donateChunk
BSONObjBuilder n( op.subobjStart( "o" ) );
- n.append(ChunkFields::name(), Chunk::genID(ns, bumpMin));
- nextVersion.addToBSON(n, ChunkFields::lastmod());
- n.append(ChunkFields::ns(), ns);
- n.append(ChunkFields::min(), bumpMin);
- n.append(ChunkFields::max(), bumpMax);
- n.append(ChunkFields::shard(), fromShard.getName());
+ n.append(ChunkType::name(), Chunk::genID(ns, bumpMin));
+ nextVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod());
+ n.append(ChunkType::ns(), ns);
+ n.append(ChunkType::min(), bumpMin);
+ n.append(ChunkType::max(), bumpMax);
+ n.append(ChunkType::shard(), fromShard.getName());
n.done();
BSONObjBuilder q( op.subobjStart( "o2" ) );
- q.append(ChunkFields::name(), Chunk::genID(ns, bumpMin));
+ q.append(ChunkType::name(), Chunk::genID(ns, bumpMin));
q.done();
updates.append( op.obj() );
@@ -1337,13 +1338,13 @@ namespace mongo {
BSONArrayBuilder preCond( cmdBuilder.subarrayStart( "preCondition" ) );
{
BSONObjBuilder b;
- b.append("ns", ConfigNS::chunk);
- b.append("q", BSON("query" << BSON(ChunkFields::ns(ns)) <<
- "orderby" << BSON(ChunkFields::lastmod() << -1)));
+ b.append("ns", ChunkType::ConfigNS);
+ b.append("q", BSON("query" << BSON(ChunkType::ns(ns)) <<
+ "orderby" << BSON(ChunkType::DEPRECATED_lastmod() << -1)));
{
BSONObjBuilder bb( b.subobjStart( "res" ) );
// TODO: For backwards compatibility, we can't yet require an epoch here
- bb.appendTimestamp(ChunkFields::lastmod(), maxVersion.toLong());
+ bb.appendTimestamp(ChunkType::DEPRECATED_lastmod(), maxVersion.toLong());
bb.done();
}
preCond.append( b.obj() );
@@ -1392,12 +1393,12 @@ namespace mongo {
// look for the chunk in this shard whose version got bumped
// we assume that if that mod made it to the config, the applyOps was successful
- BSONObj doc = conn->get()->findOne(ConfigNS::chunk,
- Query(BSON(ChunkFields::ns(ns)))
- .sort(BSON(ChunkFields::lastmod() << -1)));
+ BSONObj doc = conn->get()->findOne(ChunkType::ConfigNS,
+ Query(BSON(ChunkType::ns(ns)))
+ .sort(BSON(ChunkType::DEPRECATED_lastmod() << -1)));
ShardChunkVersion checkVersion =
- ShardChunkVersion::fromBSON(doc[ChunkFields::lastmod()]);
+ ShardChunkVersion::fromBSON(doc[ChunkType::DEPRECATED_lastmod()]);
if ( checkVersion.isEquivalentTo( nextVersion ) ) {
log() << "moveChunk commit confirmed" << migrateLog;
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 96fab9a0cc8..1cbfcd1eab1 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -39,6 +39,7 @@
#include "chunk.h" // for static genID only
#include "config.h"
#include "d_logic.h"
+#include "mongo/s/type_chunk.h"
namespace mongo {
@@ -455,9 +456,9 @@ namespace mongo {
void ChunkInfo::appendShortVersion( const char * name , BSONObjBuilder& b ) const {
BSONObjBuilder bb( b.subobjStart( name ) );
- bb.append(ChunkFields::min(), min);
- bb.append(ChunkFields::max(), max);
- lastmod.addToBSON(bb, ChunkFields::lastmod());
+ bb.append(ChunkType::min(), min);
+ bb.append(ChunkType::max(), max);
+ lastmod.addToBSON(bb, ChunkType::DEPRECATED_lastmod());
bb.done();
}
@@ -589,24 +590,24 @@ namespace mongo {
ScopedDbConnection::getInternalScopedDbConnection(
shardingState.getConfigServer(), 30));
- BSONObj x = conn->get()->findOne(ConfigNS::chunk,
- Query(BSON(ChunkFields::ns(ns)))
- .sort(BSON(ChunkFields::lastmod() << -1)));
+ BSONObj x = conn->get()->findOne(ChunkType::ConfigNS,
+ Query(BSON(ChunkType::ns(ns)))
+ .sort(BSON(ChunkType::DEPRECATED_lastmod() << -1)));
- maxVersion = ShardChunkVersion::fromBSON(x, ChunkFields::lastmod());
+ maxVersion = ShardChunkVersion::fromBSON(x, ChunkType::DEPRECATED_lastmod());
BSONObj currChunk =
- conn->get()->findOne(ConfigNS::chunk,
- shardId.wrap(ChunkFields::name().c_str())).getOwned();
+ conn->get()->findOne(ChunkType::ConfigNS,
+ shardId.wrap(ChunkType::name().c_str())).getOwned();
- verify(currChunk[ChunkFields::shard()].type());
- verify(currChunk[ChunkFields::min()].type());
- verify(currChunk[ChunkFields::max()].type());
- shard = currChunk[ChunkFields::shard()].String();
+ verify(currChunk[ChunkType::shard()].type());
+ verify(currChunk[ChunkType::min()].type());
+ verify(currChunk[ChunkType::max()].type());
+ shard = currChunk[ChunkType::shard()].String();
conn->done();
- BSONObj currMin = currChunk[ChunkFields::min()].Obj();
- BSONObj currMax = currChunk[ChunkFields::max()].Obj();
+ BSONObj currMin = currChunk[ChunkType::min()].Obj();
+ BSONObj currMax = currChunk[ChunkType::max()].Obj();
if ( currMin.woCompare( min ) || currMax.woCompare( max ) ) {
errmsg = "chunk boundaries are outdated (likely a split occurred)";
result.append( "currMin" , currMin );
@@ -641,7 +642,7 @@ namespace mongo {
origChunk.min = currMin.getOwned();
origChunk.max = currMax.getOwned();
- origChunk.lastmod = ShardChunkVersion::fromBSON(currChunk[ChunkFields::lastmod()]);
+ origChunk.lastmod = ShardChunkVersion::fromBSON(currChunk[ChunkType::DEPRECATED_lastmod()]);
// since this could be the first call that enable sharding we also make sure to have the chunk manager up to date
shardingState.gotShardName( shard );
@@ -679,21 +680,21 @@ namespace mongo {
BSONObjBuilder op;
op.append( "op" , "u" );
op.appendBool( "b" , true );
- op.append( "ns" , ConfigNS::chunk );
+ op.append( "ns" , ChunkType::ConfigNS );
// add the modified (new) chunk information as the update object
BSONObjBuilder n( op.subobjStart( "o" ) );
- n.append(ChunkFields::name(), Chunk::genID(ns, startKey));
- myVersion.addToBSON(n, ChunkFields::lastmod());
- n.append(ChunkFields::ns(), ns);
- n.append(ChunkFields::min(), startKey);
- n.append(ChunkFields::max(), endKey);
- n.append(ChunkFields::shard(), shard);
+ n.append(ChunkType::name(), Chunk::genID(ns, startKey));
+ myVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod());
+ n.append(ChunkType::ns(), ns);
+ n.append(ChunkType::min(), startKey);
+ n.append(ChunkType::max(), endKey);
+ n.append(ChunkType::shard(), shard);
n.done();
// add the chunk's _id as the query part of the update statement
BSONObjBuilder q( op.subobjStart( "o2" ) );
- q.append(ChunkFields::name(), Chunk::genID(ns, startKey));
+ q.append(ChunkType::name(), Chunk::genID(ns, startKey));
q.done();
updates.append( op.obj() );
@@ -709,13 +710,13 @@ namespace mongo {
{
BSONArrayBuilder preCond( cmdBuilder.subarrayStart( "preCondition" ) );
BSONObjBuilder b;
- b.append("ns", ConfigNS::chunk);
- b.append("q", BSON("query" << BSON(ChunkFields::ns(ns)) <<
- "orderby" << BSON(ChunkFields::lastmod() << -1)));
+ b.append("ns", ChunkType::ConfigNS);
+ b.append("q", BSON("query" << BSON(ChunkType::ns(ns)) <<
+ "orderby" << BSON(ChunkType::DEPRECATED_lastmod() << -1)));
{
BSONObjBuilder bb( b.subobjStart( "res" ) );
// TODO: For backwards compatibility, we can't yet require an epoch here
- bb.appendTimestamp(ChunkFields::lastmod(), maxVersion.toLong());
+ bb.appendTimestamp(ChunkType::DEPRECATED_lastmod(), maxVersion.toLong());
bb.done();
}
preCond.append( b.obj() );
diff --git a/src/mongo/s/type_chunk.h b/src/mongo/s/type_chunk.h
index d55e994f2f3..e56f9f549dc 100644
--- a/src/mongo/s/type_chunk.h
+++ b/src/mongo/s/type_chunk.h
@@ -60,18 +60,18 @@ namespace mongo {
static const std::string ConfigNS;
// Field names and types in the chunk collection type.
- static BSONField<std::string> name;
- static BSONField<std::string> ns;
- static BSONField<BSONObj> min;
- static BSONField<BSONObj> max;
- static BSONField<BSONArray> version;
- static BSONField<std::string> shard;
- static BSONField<bool> jumbo;
+ static BSONField<std::string> name; // chunk's id
+ static BSONField<std::string> ns; // namespace this chunk is in
+ static BSONField<BSONObj> min; // first key of the chunk, including
+ static BSONField<BSONObj> max; // last key of the chunk, non-including
+ static BSONField<BSONArray> version; // [Date_t, OID]
+ static BSONField<std::string> shard; // home of this chunk
+ static BSONField<bool> jumbo; // too big to move?
// Transition to new format, 2.2 -> 2.4
// 2.2 can read both lastmod + lastmodEpoch format and 2.4 [ lastmod, OID ] formats.
- static BSONField<Date_t> DEPRECATED_lastmod; // major | minor versions
- static BSONField<OID> DEPRECATED_epoch; // disambiguates collection incarnations
+ static BSONField<Date_t> DEPRECATED_lastmod; // major | minor versions
+ static BSONField<OID> DEPRECATED_epoch; // OID, to disambiguate collection incarnations
//
// chunk type methods
diff --git a/src/mongo/s/util.h b/src/mongo/s/util.h
index e18e8f02218..bbb51b43d83 100644
--- a/src/mongo/s/util.h
+++ b/src/mongo/s/util.h
@@ -237,7 +237,8 @@ namespace mongo {
if( prefixIn == "" && ! obj[ "version" ].eoo() ){
prefix = (string)"version";
}
- /// TODO: use ChunkFields::lastmod()
+ // TODO: use ChunkType::DEPRECATED_lastmod()
+ // NOTE: type_chunk.h includes this file
else if( prefixIn == "" && ! obj["lastmod"].eoo() ){
prefix = (string)"lastmod";
}