summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-05-06 10:43:19 -0400
committerRandolph Tan <randolph@10gen.com>2014-06-06 10:31:49 -0400
commit0d5acb0e3a6b0f1cdf7f252aa9a13afb1e884848 (patch)
tree761487842572c5f02b40cd0c7cdd1cf63aa596c3
parent2453cec627bb8f6100980dea273ac9eb54ecd645 (diff)
downloadmongo-0d5acb0e3a6b0f1cdf7f252aa9a13afb1e884848.tar.gz
SERVER-11256 improve handling of empty vs nonexistent CollectionMetadata
-rw-r--r--jstests/sharding/multi_write_target.js18
-rw-r--r--jstests/sharding/shard3.js15
-rw-r--r--jstests/sharding/ssv_nochunk.js40
-rw-r--r--jstests/sharding/update_immutable_fields.js5
-rw-r--r--jstests/sharding/version1.js27
-rw-r--r--jstests/sharding/version2.js19
-rw-r--r--jstests/sharding/zero_shard_version.js180
-rw-r--r--src/mongo/dbtests/sharding.cpp25
-rw-r--r--src/mongo/s/chunk_diff-inl.cpp2
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp4
-rw-r--r--src/mongo/s/chunk_version.h14
-rw-r--r--src/mongo/s/config.cpp2
-rw-r--r--src/mongo/s/d_state.cpp98
-rw-r--r--src/mongo/s/metadata_loader.cpp2
-rw-r--r--src/mongo/s/stale_exception.h2
15 files changed, 298 insertions, 155 deletions
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 3fc528293c8..8796a6ea6b4 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -4,7 +4,7 @@
var options = { separateConfig : true };
-var st = new ShardingTest({ shards : 3, mongos : 1, other : options });
+var st = new ShardingTest({ shards : 3, mongos : 2, other : options });
st.stopBalancer();
var mongos = st.s0;
@@ -16,19 +16,22 @@ assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
assert( admin.runCommand({ shardCollection : coll + "", key : { skey : 1 } }).ok );
assert( admin.runCommand({ split : coll + "", middle : { skey : 0 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { skey : 100 } }).ok );
assert( admin.runCommand({ moveChunk : coll + "",
find : { skey : 0 },
to : shards[1]._id }).ok );
+assert( admin.runCommand({ moveChunk : coll + "",
+ find : { skey : 100 },
+ to : shards[2]._id }).ok );
st.printShardingStatus();
jsTest.log("Testing multi-update...");
// Put data on all shards
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }));
-assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }));
-// Data not in chunks
-assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 }));
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }));
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }));
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id: 0, skey: 100, x: 1 }));
// Non-multi-update doesn't work without shard key
assert.writeError(coll.update({ x : 1 }, { $set : { updated : true } }, { multi : false }));
@@ -40,8 +43,9 @@ assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updated : tr
assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({ updated : true }));
assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updated : true }));
-// _id update works, and goes to all shards
-assert.writeOK(coll.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false }));
+// _id update works, and goes to all shards even on the stale mongos
+var staleColl = st.s1.getCollection('foo.bar');
+assert.writeOK(staleColl.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false }));
// Ensure _id update goes to *all* shards
assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updatedById : true }));
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 8674df81aca..8b36f809686 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -109,21 +109,6 @@ s.printCollectionInfo( "test.foo" , "after counts" );
assert.eq( 0 , primary.count() , "p count after drop" )
assert.eq( 0 , secondary.count() , "s count after drop" )
-// NOTE
-// the following bypasses the sharding layer and writes straight to the servers
-// this is not supported at all but we'd like to leave this backdoor for now
-primary.save( { num : 1 } );
-secondary.save( { num : 4 } );
-assert.eq( 1 , primary.count() , "p count after drop and save" )
-assert.eq( 1 , secondary.count() , "s count after drop and save " )
-
-print("*** makes sure that sharded access respects the drop command" );
-
-assert.isnull( a.findOne() , "lookup via mongos 'a' accessed dropped data" );
-assert.isnull( b.findOne() , "lookup via mongos 'b' accessed dropped data" );
-
-s.printCollectionInfo( "test.foo" , "after b findOne tests" );
-
print( "*** dropDatabase setup" )
s.printShardingStatus()
diff --git a/jstests/sharding/ssv_nochunk.js b/jstests/sharding/ssv_nochunk.js
deleted file mode 100644
index 7d3f8bfdbb2..00000000000
--- a/jstests/sharding/ssv_nochunk.js
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Tests setShardVersion, particularly on the case where mongos sends it to a
- * shard that does not have any chunks.
- */
-
-var st = new ShardingTest({ shards: 2, mongos: 2 });
-st.stopBalancer();
-
-var configDB = st.s.getDB('config');
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ movePrimary: 'test', to: 'shard0000' });
-configDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
-
-var testDB = st.s.getDB('test');
-
-assert.writeOK(testDB.user.insert({ x: 1 }));
-
-var doc = testDB.user.findOne();
-
-var testDB2 = st.s1.getDB('test');
-
-configDB.adminCommand({ moveChunk: 'test.user', find: { x: 0 }, to: 'shard0001' });
-
-assert.eq(1, testDB.user.find().itcount());
-assert.eq(1, testDB2.user.find().itcount());
-
-assert.eq(1, testDB.user.find({ x: 1 }).itcount());
-assert.eq(1, testDB2.user.find({ x: 1 }).itcount());
-
-var configDB2 = st.s1.getDB('config');
-configDB2.adminCommand({ moveChunk: 'test.user', find: { x: 0 }, to: 'shard0000' });
-
-assert.eq(1, testDB.user.find().itcount());
-assert.eq(1, testDB2.user.find().itcount());
-
-assert.eq(1, testDB.user.find({ x: 1 }).itcount());
-assert.eq(1, testDB2.user.find({ x: 1 }).itcount());
-
-st.stop();
-
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index 25fb489e39e..31c2cc37a6f 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -23,8 +23,9 @@ var getDirectShardedConn = function( st, collName ) {
var maxChunk = st.s0.getCollection( "config.chunks" )
.find({ ns : collName }).sort({ lastmod : -1 }).next();
- var ssvInitCmd = { setShardVersion : collName,
- configdb : configConnStr,
+ var ssvInitCmd = { setShardVersion : collName,
+ authoritative : true,
+ configdb : configConnStr,
serverID : mockServerId,
version : maxChunk.lastmod,
versionEpoch : maxChunk.lastmodEpoch };
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index a16ead34f56..3fd288cc1c4 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -26,15 +26,30 @@ assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._c
shard: "shard0000" , shardHost: "localhost:30000" } ) ,
"should have failed because version is config is 1|0" );
-assert.commandWorked( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
- version : new NumberLong( 4294967296 ), // 1|0
- authoritative : true , shard: "shard0000" , shardHost: "localhost:30000" } ) ,
+var epoch = s.getDB('config').chunks.findOne().lastmodEpoch;
+assert.commandWorked( a.runCommand({ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new NumberLong( 4294967296 ), // 1|0
+ versionEpoch: epoch,
+ authoritative: true,
+ shard: "shard0000",
+ shardHost: "localhost:30000" }),
"should have worked" );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : "a" , version : 2 } ).ok == 0 , "A" );
+assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
+ configdb: "a",
+ version: 2,
+ versionEpoch: epoch }).ok == 0, "A" );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 , "B" );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 1 } ).ok == 0 , "C" );
+assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: 2,
+ versionEpoch: epoch }).ok == 0, "B" );
+
+assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: 1,
+ versionEpoch: epoch }).ok == 0, "C" );
// the only way that setSharVersion passes is if the shard agrees with the version
// the shard takes its version from config directly
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 2944e5cdc45..6bad1d5a81c 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -13,9 +13,14 @@ a = s._connections[0].getDB( "admin" );
assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i, 0 );
assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i, 0 );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , authoritative : true ,
- version : new NumberLong( 4294967296 ), // 1|0
- shard: "shard0000" , shardHost: "localhost:30000" } ).ok == 1 );
+var fooEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.foo' }).lastmodEpoch;
+assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ authoritative: true,
+ version: new NumberLong( 4294967296 ), // 1|0
+ versionEpoch: fooEpoch,
+ shard: "shard0000",
+ shardHost: "localhost:30000" }).ok == 1 );
printjson( s.config.chunks.findOne() );
@@ -33,7 +38,13 @@ function simpleFindOne(){
return a2.getMongo().getDB( "alleyinsider" ).foo.findOne();
}
-assert.commandWorked( a2.runCommand( { "setShardVersion" : "alleyinsider.bar" , configdb : s._configDB , version : new NumberLong( 4294967296 ) , authoritative : true } ) , "setShardVersion bar temp");
+var barEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.bar' }).lastmodEpoch;
+assert.commandWorked( a2.runCommand({ setShardVersion: "alleyinsider.bar",
+ configdb: s._configDB,
+ version: new NumberLong( 4294967296 ),
+ versionEpoch: barEpoch,
+ authoritative: true }),
+ "setShardVersion bar temp" );
assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1" );
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
new file mode 100644
index 00000000000..44f7fcb0754
--- /dev/null
+++ b/jstests/sharding/zero_shard_version.js
@@ -0,0 +1,180 @@
+/**
+ * Tests the setShardVersion logic on the this shard side, specifically when comparing
+ * against a major version of zero or incompatible epochs.
+ */
+
+var st = new ShardingTest({ shards: 2, mongos: 4 });
+st.stopBalancer();
+
+var testDB_s0 = st.s.getDB('test');
+testDB_s0.adminCommand({ enableSharding: 'test' });
+testDB_s0.adminCommand({ movePrimary: 'test', to: 'shard0001' });
+testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+
+var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({ getShardVersion: 'test.user' });
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+};
+
+///////////////////////////////////////////////////////
+// Test shard with empty chunk
+// mongos versions: s0: 1|0|a
+
+// shard0: 0|0|a
+// shard1: 1|0|a, [-inf, inf)
+// mongos0: 1|0|a
+
+var testDB_s1 = st.s1.getDB('test');
+assert.writeOK(testDB_s1.user.insert({ x: 1 }));
+assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
+ find: { x: 0 },
+ to: 'shard0000' }));
+
+// Official config:
+// shard0: 2|0|a, [-inf, inf)
+// shard1: 0|0|a
+//
+// Shard metadata:
+// shard0: 0|0|a
+// shard1: 0|0|a
+//
+// mongos0: 1|0|a
+
+checkShardMajorVersion(st.d0, 0);
+checkShardMajorVersion(st.d1, 0);
+
+// mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s0.user.findOne({ x: 1 }));
+
+checkShardMajorVersion(st.d0, 2);
+checkShardMajorVersion(st.d1, 0);
+
+// Set mongos2 & mongos3 to version 2|0|a
+var testDB_s2 = st.s2.getDB('test');
+assert.neq(null, testDB_s2.user.findOne({ x: 1 }));
+
+var testDB_s3 = st.s3.getDB('test');
+assert.neq(null, testDB_s3.user.findOne({ x: 1 }));
+
+///////////////////////////////////////////////////////
+// Test unsharded collection
+// mongos versions: s0, s2, s3: 2|0|a
+
+testDB_s1.user.drop();
+assert.writeOK(testDB_s1.user.insert({ x: 10 }));
+
+// shard0: 0|0|0
+// shard1: 0|0|0
+// mongos0: 2|0|a
+
+checkShardMajorVersion(st.d0, 0);
+checkShardMajorVersion(st.d1, 0);
+
+// mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped,
+// query should be routed to primary shard.
+assert.neq(null, testDB_s0.user.findOne({ x: 10 }));
+
+checkShardMajorVersion(st.d0, 0);
+checkShardMajorVersion(st.d1, 0);
+
+///////////////////////////////////////////////////////
+// Test 2 shards with 1 chunk
+// mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+
+testDB_s1.user.drop();
+testDB_s1.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+testDB_s1.adminCommand({ split: 'test.user', middle: { x: 0 }});
+
+// shard0: 0|0|b,
+// shard1: 1|1|b, [-inf, 0), [0, inf)
+
+testDB_s1.user.insert({ x: 1 });
+testDB_s1.user.insert({ x: -11 });
+assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
+ find: { x: -1 },
+ to: 'shard0000' }));
+
+// Official config:
+// shard0: 2|0|b, [-inf, 0)
+// shard1: 2|1|b, [0, inf)
+//
+// Shard metadata:
+// shard0: 0|0|b
+// shard1: 2|1|b
+//
+// mongos2: 2|0|a
+
+checkShardMajorVersion(st.d0, 0);
+checkShardMajorVersion(st.d1, 2);
+
+// mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s2.user.findOne({ x: 1 }));
+
+checkShardMajorVersion(st.d0, 0);
+checkShardMajorVersion(st.d1, 2);
+
+// Set shard metadata to 2|0|b
+assert.neq(null, testDB_s2.user.findOne({ x: -11 }));
+
+checkShardMajorVersion(st.d0, 2);
+checkShardMajorVersion(st.d1, 2);
+
+// Official config:
+// shard0: 2|0|b, [-inf, 0)
+// shard1: 2|1|b, [0, inf)
+//
+// Shard metadata:
+// shard0: 2|0|b
+// shard1: 2|1|b
+//
+// mongos3: 2|0|a
+
+// 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s3.user.findOne({ x: 1 }));
+
+///////////////////////////////////////////////////////
+// Test mongos thinks unsharded when it's actually sharded
+// mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+
+// Set mongos0 to version 0|0|0
+testDB_s0.user.drop();
+
+checkShardMajorVersion(st.d0, 0);
+checkShardMajorVersion(st.d1, 0);
+
+assert.eq(null, testDB_s0.user.findOne({ x: 1 }));
+
+// Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+// already sharded.
+assert.eq(null, testDB_s1.user.findOne({ x: 1 }));
+assert.commandWorked(testDB_s1.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
+testDB_s1.user.insert({ x: 1 });
+
+assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
+ find: { x: 0 },
+ to: 'shard0000' }));
+
+// Official config:
+// shard0: 2|0|c, [-inf, inf)
+// shard1: 0|0|c
+//
+// Shard metadata:
+// shard0: 0|0|c
+// shard1: 0|0|c
+//
+// mongos0: 0|0|0
+
+checkShardMajorVersion(st.d0, 0);
+checkShardMajorVersion(st.d1, 0);
+
+// 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+assert.neq(null, testDB_s0.user.findOne({ x: 1 }));
+
+checkShardMajorVersion(st.d0, 2);
+checkShardMajorVersion(st.d1, 0);
+
+st.stop();
+
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index fd9f2011d81..3f4abe501c1 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -32,10 +32,13 @@
#include "mongo/client/dbclientmockcursor.h"
#include "mongo/client/parallel.h"
+#include "mongo/dbtests/config_server_fixture.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/s/chunk_diff.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/config.h"
#include "mongo/s/type_chunk.h"
+#include "mongo/s/type_collection.h"
namespace ShardingTests {
@@ -78,13 +81,6 @@ namespace ShardingTests {
class ChunkManagerTest : public ConnectionString::ConnectionHook {
public:
- class CustomDirectClient : public DBDirectClient {
- public:
- virtual ConnectionString::ConnectionType type() const {
- return ConnectionString::CUSTOM;
- }
- };
-
CustomDirectClient _client;
Shard _shard;
@@ -114,6 +110,7 @@ namespace ShardingTests {
client().ensureIndex( ChunkType::ConfigNS, // br
BSON( ChunkType::ns() << 1 << // br
ChunkType::DEPRECATED_lastmod() << 1 ) );
+ configServer.init("$dummy:1000");
}
virtual ~ChunkManagerTest() {
@@ -255,8 +252,18 @@ namespace ShardingTests {
ChunkType::DEPRECATED_lastmod());
// Make manager load existing chunks
- ChunkManagerPtr manager( new ChunkManager( collName(), ShardKeyPattern( BSON( "_id" << 1 ) ), false ) );
- ((ChunkManager*) manager.get())->loadExistingRanges( shard().getConnString() );
+ BSONObjBuilder collDocBuilder;
+ collDocBuilder << CollectionType::ns(collName());
+ collDocBuilder << CollectionType::keyPattern(BSON( "_id" << 1 ));
+ collDocBuilder << CollectionType::unique(false);
+ collDocBuilder << CollectionType::dropped(false);
+ collDocBuilder << CollectionType::DEPRECATED_lastmod(jsTime());
+ collDocBuilder << CollectionType::DEPRECATED_lastmodEpoch(version.epoch());
+
+ BSONObj collDoc(collDocBuilder.done());
+
+ ChunkManagerPtr manager( new ChunkManager(collDoc) );
+ const_cast<ChunkManager *>(manager.get())->loadExistingRanges(shard().getConnString());
ASSERT( manager->getVersion().epoch() == version.epoch() );
ASSERT( manager->getVersion().minorVersion() == ( numChunks - 1 ) );
diff --git a/src/mongo/s/chunk_diff-inl.cpp b/src/mongo/s/chunk_diff-inl.cpp
index e89f0d85312..aa06349e8ec 100644
--- a/src/mongo/s/chunk_diff-inl.cpp
+++ b/src/mongo/s/chunk_diff-inl.cpp
@@ -153,7 +153,7 @@ namespace mongo {
continue;
}
- if( ! chunkVersion.isSet() || ! chunkVersion.hasCompatibleEpoch( currEpoch ) ){
+ if( ! chunkVersion.isSet() || ! chunkVersion.hasEqualEpoch( currEpoch ) ){
warning() << "got invalid chunk version " << chunkVersion << " in document " << diffChunkDoc
<< " when trying to load differing chunks at version "
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index 4b0ce033287..e07cdc83324 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -419,7 +419,7 @@ namespace mongo {
const ChunkVersion& shardVersionB ) {
// Collection may have been dropped
- if ( !shardVersionA.hasCompatibleEpoch( shardVersionB ) ) return CompareResult_Unknown;
+ if ( !shardVersionA.hasEqualEpoch( shardVersionB ) ) return CompareResult_Unknown;
// Zero shard versions are only comparable to themselves
if ( !shardVersionA.isSet() || !shardVersionB.isSet() ) {
@@ -568,7 +568,7 @@ namespace mongo {
}
else {
ChunkVersion& previouslyNotedVersion = it->second;
- if ( previouslyNotedVersion.hasCompatibleEpoch( remoteShardVersion )) {
+ if ( previouslyNotedVersion.hasEqualEpoch( remoteShardVersion )) {
if ( previouslyNotedVersion.isOlderThan( remoteShardVersion )) {
remoteShardVersion.cloneTo( &previouslyNotedVersion );
}
diff --git a/src/mongo/s/chunk_version.h b/src/mongo/s/chunk_version.h
index e5b27ec53fb..6ed2613f225 100644
--- a/src/mongo/s/chunk_version.h
+++ b/src/mongo/s/chunk_version.h
@@ -178,13 +178,13 @@ namespace mongo {
// Can we write to this data and not have a problem?
bool isWriteCompatibleWith( const ChunkVersion& otherVersion ) const {
- if( ! hasCompatibleEpoch( otherVersion ) ) return false;
+ if( ! hasEqualEpoch( otherVersion ) ) return false;
return otherVersion._major == _major;
}
// Is this the same version?
bool isEquivalentTo( const ChunkVersion& otherVersion ) const {
- if( ! hasCompatibleEpoch( otherVersion ) ) return false;
+ if( ! hasEqualEpoch( otherVersion ) ) return false;
return otherVersion._combined == _combined;
}
@@ -214,14 +214,12 @@ namespace mongo {
}
// Is this in the same epoch?
- bool hasCompatibleEpoch( const ChunkVersion& otherVersion ) const {
- return hasCompatibleEpoch( otherVersion._epoch );
+ bool hasEqualEpoch( const ChunkVersion& otherVersion ) const {
+ return hasEqualEpoch( otherVersion._epoch );
}
- bool hasCompatibleEpoch( const OID& otherEpoch ) const {
- // TODO : Change logic from eras are not-unequal to eras are equal
- if( otherEpoch.isSet() && _epoch.isSet() && otherEpoch != _epoch ) return false;
- return true;
+ bool hasEqualEpoch( const OID& otherEpoch ) const {
+ return _epoch == otherEpoch;
}
//
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index ce4e6597c03..516ef73f9f2 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -425,7 +425,7 @@ namespace mongo {
// Only reload if the version we found is newer than our own in the same
// epoch
if( currentVersion <= ci.getCM()->getVersion() &&
- ci.getCM()->getVersion().hasCompatibleEpoch( currentVersion ) )
+ ci.getCM()->getVersion().hasEqualEpoch( currentVersion ) )
{
return ci.getCM();
}
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 0219c865e45..bddaf05be79 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -1017,56 +1017,51 @@ namespace mongo {
const ChunkVersion globalVersion = shardingState.getVersion(ns);
oldVersion.addToBSON( result, "oldVersion" );
-
- if ( globalVersion.isSet() && version.isSet() ) {
- // this means there is no reset going on an either side
- // so its safe to make some assumptions
-
- if ( version.isWriteCompatibleWith( globalVersion ) ) {
- // mongos and mongod agree!
- if ( ! oldVersion.isWriteCompatibleWith( version ) ) {
- if ( oldVersion < globalVersion &&
- oldVersion.hasCompatibleEpoch(globalVersion) )
- {
- info->setVersion( ns , version );
- }
- else if ( authoritative ) {
- // this means there was a drop and our version is reset
- info->setVersion( ns , version );
- }
- else {
- result.append( "ns" , ns );
- result.appendBool( "need_authoritative" , true );
- errmsg = "verifying drop on '" + ns + "'";
- return false;
- }
+
+ if ( version.isWriteCompatibleWith( globalVersion )) {
+ // mongos and mongod agree!
+ if ( !oldVersion.isWriteCompatibleWith( version )) {
+ if ( oldVersion < globalVersion &&
+ oldVersion.hasEqualEpoch( globalVersion )) {
+ info->setVersion( ns, version );
+ }
+ else if ( authoritative ) {
+ // this means there was a drop and our version is reset
+ info->setVersion( ns, version );
+ }
+ else {
+ result.append( "ns", ns );
+ result.appendBool( "need_authoritative", true );
+ errmsg = "verifying drop on '" + ns + "'";
+ return false;
}
- return true;
}
-
- }
-
- // step 4
- if ( oldVersion.isSet() && ! globalVersion.isSet() ) {
- // this had been reset
- info->setVersion( ns , ChunkVersion( 0, OID() ) );
- }
-
- if ( ! version.isSet() && ! globalVersion.isSet() ) {
- // this connection is cleaning itself
- info->setVersion( ns , ChunkVersion( 0, OID() ) );
return true;
}
+ // step 4
// Cases below all either return OR fall-through to remote metadata reload.
- if ( version.isSet() || !globalVersion.isSet() ) {
+ const bool isDropRequested = !version.isSet() && globalVersion.isSet();
+
+ if (isDropRequested) {
+ if ( ! authoritative ) {
+ result.appendBool( "need_authoritative" , true );
+ result.append( "ns" , ns );
+ globalVersion.addToBSON( result, "globalVersion" );
+ errmsg = "dropping needs to be authoritative";
+ return false;
+ }
+ // Fall through to metadata reload below
+ }
+ else {
// Not Dropping
// TODO: Refactor all of this
- if ( version < oldVersion && version.hasCompatibleEpoch( oldVersion ) ) {
- errmsg = "this connection already had a newer version of collection '" + ns + "'";
+ if ( version < oldVersion && version.hasEqualEpoch( oldVersion ) ) {
+ errmsg = str::stream() << "this connection already had a newer version "
+ << "of collection '" << ns << "'";
result.append( "ns" , ns );
version.addToBSON( result, "newVersion" );
globalVersion.addToBSON( result, "globalVersion" );
@@ -1074,12 +1069,13 @@ namespace mongo {
}
// TODO: Refactor all of this
- if ( version < globalVersion && version.hasCompatibleEpoch( globalVersion ) ) {
+ if ( version < globalVersion && version.hasEqualEpoch( globalVersion ) ) {
while ( shardingState.inCriticalMigrateSection() ) {
log() << "waiting till out of critical section" << endl;
shardingState.waitTillNotInCriticalSection( 10 );
}
- errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'";
+ errmsg = str::stream() << "shard global version for collection is higher "
+ << "than trying to set to '" << ns << "'";
result.append( "ns" , ns );
version.addToBSON( result, "version" );
globalVersion.addToBSON( result, "globalVersion" );
@@ -1088,8 +1084,8 @@ namespace mongo {
}
if ( ! globalVersion.isSet() && ! authoritative ) {
- // Needed b/c when the last chunk is moved off a shard, the version gets reset to zero, which
- // should require a reload.
+ // Needed b/c when the last chunk is moved off a shard,
+ // the version gets reset to zero, which should require a reload.
while ( shardingState.inCriticalMigrateSection() ) {
log() << "waiting till out of critical section" << endl;
shardingState.waitTillNotInCriticalSection( 10 );
@@ -1104,20 +1100,6 @@ namespace mongo {
// Fall through to metadata reload below
}
- else {
-
- // Dropping
-
- if ( ! authoritative ) {
- result.appendBool( "need_authoritative" , true );
- result.append( "ns" , ns );
- globalVersion.addToBSON( result, "globalVersion" );
- errmsg = "dropping needs to be authoritative";
- return false;
- }
-
- // Fall through to metadata reload below
- }
ChunkVersion currVersion;
Status status = shardingState.refreshMetadataIfNeeded( ns, version, &currVersion );
@@ -1299,7 +1281,7 @@ namespace mongo {
// Check epoch first, to send more meaningful message, since other parameters probably
// won't match either
- if( ! wanted.hasCompatibleEpoch( received ) ){
+ if( ! wanted.hasEqualEpoch( received ) ){
errmsg = str::stream() << "version epoch mismatch detected for " << ns << ", "
<< "the collection may have been dropped and recreated";
return false;
diff --git a/src/mongo/s/metadata_loader.cpp b/src/mongo/s/metadata_loader.cpp
index c463bd4a13b..a6dad3a8fdf 100644
--- a/src/mongo/s/metadata_loader.cpp
+++ b/src/mongo/s/metadata_loader.cpp
@@ -215,7 +215,7 @@ namespace mongo {
if ( oldMetadata ) {
// If our epochs are compatible, it's useful to use the old metadata for diffs
- if ( oldMetadata->getCollVersion().hasCompatibleEpoch( epoch ) ) {
+ if ( oldMetadata->getCollVersion().hasEqualEpoch( epoch ) ) {
fullReload = false;
dassert( oldMetadata->isValid() );
diff --git a/src/mongo/s/stale_exception.h b/src/mongo/s/stale_exception.h
index e91a243df60..c19acc10d7c 100644
--- a/src/mongo/s/stale_exception.h
+++ b/src/mongo/s/stale_exception.h
@@ -105,7 +105,7 @@ namespace mongo {
* true if this exception would require a full reload of config data to resolve
*/
bool requiresFullReload() const {
- return ! _received.hasCompatibleEpoch( _wanted ) ||
+ return ! _received.hasEqualEpoch( _wanted ) ||
_received.isSet() != _wanted.isSet();
}