summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2010-08-02 14:50:32 -0400
committerEliot Horowitz <eliot@10gen.com>2010-08-02 14:50:32 -0400
commit16a8abcec1f612be9952d8f7c83fb1166cef515c (patch)
tree682a2e847e2a0d41dbdc552b620ec7a3132adb0a
parentd25088f9c313e1c0f33fe16dec618a92e1d529f2 (diff)
parent4c243706fab61f8e6ea0d99e16cc4b2a1fb1e75a (diff)
downloadmongo-16a8abcec1f612be9952d8f7c83fb1166cef515c.tar.gz
Merge branch 'master' of github.com:mongodb/mongo
-rw-r--r--client/dbclient.h1
-rw-r--r--client/parallel.cpp2
-rw-r--r--db/client.cpp8
-rw-r--r--db/cloner.cpp3
-rw-r--r--db/dbcommands_generic.cpp16
-rw-r--r--db/repl/health.cpp27
-rw-r--r--db/repl/manager.cpp2
-rw-r--r--db/repl/rs.h2
-rw-r--r--db/repl/rs_initiate.cpp2
-rw-r--r--db/repl/rs_rollback.cpp4
-rw-r--r--jstests/replsets/replset1.js39
-rw-r--r--jstests/replsets/replset2.js32
-rw-r--r--s/chunk.cpp7
-rw-r--r--s/config.cpp2
-rw-r--r--s/d_migrate.cpp1
-rw-r--r--s/d_state.cpp1
-rw-r--r--s/shard.cpp1
-rw-r--r--scripting/engine.cpp1
-rw-r--r--shell/servers.js19
-rw-r--r--util/concurrency/task.cpp7
20 files changed, 151 insertions, 26 deletions
diff --git a/client/dbclient.h b/client/dbclient.h
index 76c5d3014b8..5da4f84af22 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -847,6 +847,7 @@ namespace mongo {
}
/** uses QueryOption_Exhaust
+ use DBClientCursorBatchIterator if you want to do items in large blocks, perhpas to avoid granular locking and such.
*/
unsigned long long query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
unsigned long long query( boost::function<void(DBClientCursorBatchIterator&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
diff --git a/client/parallel.cpp b/client/parallel.cpp
index d605ebd857e..a7568b2d83d 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -87,6 +87,8 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor =
conn->query( _ns , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options , _batchSize == 0 ? 0 : _batchSize + skipLeft );
+
+ assert( cursor.get() );
if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ){
conn.done();
diff --git a/db/client.cpp b/db/client.cpp
index af83653c5f2..65c467a2dc0 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -387,7 +387,13 @@ namespace mongo {
tablecell( ss , co.opNum() );
tablecell( ss , co.active() );
- tablecell( ss , co.getLockType() );
+ {
+ int lt = co.getLockType();
+ if( lt == -1 ) tablecell(ss, "R");
+ else if( lt == 1 ) tablecell(ss, "W");
+ else
+ tablecell( ss , lt);
+ }
tablecell( ss , co.isWaitingForLock() );
if ( co.active() )
tablecell( ss , co.elapsedSeconds() );
diff --git a/db/cloner.cpp b/db/cloner.cpp
index 79d0c082e87..96890bf1417 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -172,9 +172,10 @@ namespace mongo {
f.context = r._context;
DBClientConnection *remote = dynamic_cast< DBClientConnection* >( conn.get() );
if ( remote ) {
- remote->query( boost::function<void(DBClientCursorBatchIterator &)>( f ), from_collection, query, 0, options );
+ remote->query( boost::function<void(DBClientCursorBatchIterator &)>( f ), from_collection, query, 0, options );
} else { // no exhaust mode for direct client, so we have this hack
auto_ptr<DBClientCursor> c = conn->query( from_collection, query, 0, 0, 0, options );
+ assert( c.get() );
while( c->more() ) {
DBClientCursorBatchIterator i( *c );
f( i );
diff --git a/db/dbcommands_generic.cpp b/db/dbcommands_generic.cpp
index 338dbd0f6ee..2d16c9f232b 100644
--- a/db/dbcommands_generic.cpp
+++ b/db/dbcommands_generic.cpp
@@ -67,7 +67,7 @@ namespace mongo {
/* for diagnostic / testing purposes. */
class CmdSleep : public Command {
public:
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return NONE; }
virtual bool adminOnly() const { return true; }
virtual bool logTheOp() {
return false;
@@ -76,11 +76,19 @@ namespace mongo {
return true;
}
virtual void help( stringstream& help ) const {
- help << "internal testing command. Makes db block (in a read lock) for 100 seconds";
+ help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
+ help << "w:true write lock";
}
- CmdSleep() : Command("sleep") {}
+ CmdSleep() : Command("sleep") { }
bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- sleepsecs(100);
+ if( cmdObj.getBoolField("w") ) {
+ writelock lk("");
+ sleepsecs(100);
+ }
+ else {
+ readlock lk("");
+ sleepsecs(100);
+ }
return true;
}
} cmdSleep;
diff --git a/db/repl/health.cpp b/db/repl/health.cpp
index 5c233a6a7e7..35da9edcdc6 100644
--- a/db/repl/health.cpp
+++ b/db/repl/health.cpp
@@ -28,6 +28,7 @@
#include "../helpers/dblogger.h"
#include "connections.h"
#include "../../util/unittest.h"
+#include "../dbhelpers.h"
namespace mongo {
/* decls for connections.h */
@@ -63,7 +64,7 @@ namespace mongo {
return s.str();
}
- void Member::summarizeAsHtml(stringstream& s) const {
+ void Member::summarizeMember(stringstream& s) const {
s << tr();
{
stringstream u;
@@ -184,6 +185,10 @@ namespace mongo {
ScopedConn conn(m->fullName());
auto_ptr<DBClientCursor> c = conn->query(rsoplog, Query().sort("$natural",1), 20, 0, &fields);
+ if( c.get() == 0 ) {
+ ss << "couldn't query " << rsoplog;
+ return;
+ }
static const char *h[] = {"ts","optime", "h","op","ns","rest",0};
ss << "<style type=\"text/css\" media=\"screen\">"
@@ -211,6 +216,10 @@ namespace mongo {
}
else {
auto_ptr<DBClientCursor> c = conn->query(rsoplog, Query().sort("$natural",-1), 20, 0, &fields);
+ if( c.get() == 0 ) {
+ ss << "couldn't query [2] " << rsoplog;
+ return;
+ }
string x;
bo o = c->next();
otEnd = o["ts"]._opTime();
@@ -290,7 +299,7 @@ namespace mongo {
Member *m = head();
while( m ) {
stringstream s;
- m->summarizeAsHtml(s);
+ m->summarizeMember(s);
mp[m->hbinfo().id()] = s.str();
m = m->next();
}
@@ -298,6 +307,20 @@ namespace mongo {
for( map<int,string>::const_iterator i = mp.begin(); i != mp.end(); i++ )
s << i->second;
s << _table();
+
+ try {
+ readlocktry lk("local.replset.minvalid", 300);
+ if( lk.got() ) {
+ BSONObj mv;
+ if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ s << p( str::stream() << "minvalid: " << mv["ts"]._opTime().toString() );
+ }
+ }
+ else s << p(".");
+ }
+ catch(...) {
+ s << p("exception fetching minvalid?");
+ }
}
diff --git a/db/repl/manager.cpp b/db/repl/manager.cpp
index e372ec4a6f8..82a0f01e1ef 100644
--- a/db/repl/manager.cpp
+++ b/db/repl/manager.cpp
@@ -48,7 +48,7 @@ namespace mongo {
task::Server("rs Manager"), rs(_rs), busyWithElectSelf(false), _primary(NOPRIMARY)
{
}
-
+
Manager::~Manager() {
log() << "should never be called?" << rsLog;
rs->mgr = 0;
diff --git a/db/repl/rs.h b/db/repl/rs.h
index a6924fc6e23..09b766ccf2f 100644
--- a/db/repl/rs.h
+++ b/db/repl/rs.h
@@ -52,7 +52,7 @@ namespace mongo {
unsigned id() const { return _hbinfo.id(); }
bool potentiallyHot() const { return _config->potentiallyHot(); } // not arbiter, not priority 0
- void summarizeAsHtml(stringstream& s) const;
+ void summarizeMember(stringstream& s) const;
friend class ReplSetImpl;
private:
const ReplSetConfig::MemberCfg *_config; /* todo: when this changes??? */
diff --git a/db/repl/rs_initiate.cpp b/db/repl/rs_initiate.cpp
index 19a345bf4d1..70a1d2de5f8 100644
--- a/db/repl/rs_initiate.cpp
+++ b/db/repl/rs_initiate.cpp
@@ -166,7 +166,7 @@ namespace mongo {
bob members;
members.append("0", BSON( "_id" << 0 << "host" << HostAndPort::Me().toString() ));
for( unsigned i = 0; i < seeds.size(); i++ )
- members.append(bob::numStr(i), BSON( "_id" << i << "host" << seeds[i].toString()));
+ members.append(bob::numStr(i+1), BSON( "_id" << i+1 << "host" << seeds[i].toString()));
b.appendArray("members", members.obj());
configObj = b.obj();
log() << "replSet created this configuration for initiation : " << configObj.toString() << rsLog;
diff --git a/db/repl/rs_rollback.cpp b/db/repl/rs_rollback.cpp
index 42814101f76..dc0b89302d1 100644
--- a/db/repl/rs_rollback.cpp
+++ b/db/repl/rs_rollback.cpp
@@ -140,10 +140,10 @@ namespace mongo {
//auto_ptr<DBClientCursor> u = us->query(rsoplog, q, 0, 0, &fields, 0, 0);
- h.rbid = getRBID(them);
+ h.rbid = getRBID(them);
auto_ptr<DBClientCursor> t = them->query(rsoplog, q, 0, 0, &fields, 0, 0);
- if( !t->more() ) throw "remote oplog empty or unreadable";
+ if( t.get() == 0 || !t->more() ) throw "remote oplog empty or unreadable";
BSONObj ourObj = u.current();
OpTime ourTime = ourObj["ts"]._opTime();
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index bc6c20e874a..61f3d99d549 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -59,7 +59,6 @@ doTest = function( signal ) {
assert( master_id != new_master_id, "Old master shouldn't be equal to new master." );
-
{
// this may fail since it has to reconnect
try {
@@ -74,8 +73,44 @@ doTest = function( signal ) {
// Here's how to restart a node:
replTest.restart( master_id );
+ // Now let's write some documents to the new master
+ for(var i=0; i<1000; i++) {
+ new_master.getDB("bar").bar.save({a: i});
+ }
+ new_master.getDB("admin").runCommand({getlasterror: 1});
+
+ // Here's how to restart the old master node:
+ slave = replTest.restart( master_id );
+
+ // Now, let's make sure that the old master comes up as a slave
+ assert.soon(function() {
+ var res = slave.getDB("admin").runCommand({ismaster: 1});
+ printjson(res);
+ return res['ok'] == 1 && res['ismaster'] == false;
+ });
+
+ // And we need to make sure that the replset comes back up
+ assert.soon(function() {
+ var res = new_master.getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson( res );
+ return res.myState == 1;
+ });
+
+ // And that both slave nodes have all the updates
+ new_master = replTest.getMaster();
+ replTest.awaitReplication();
+
+ slaves = replTest.liveNodes.slaves;
+ assert( slaves.length == 2, "Expected 2 slaves but length was " + slaves.length );
+ slaves.forEach(function(slave) {
+ slave.setSlaveOk();
+ var count = slave.getDB("bar").runCommand({count: "bar"});
+ printjson( count );
+ assert( count.n == 1000 , slave + " expected 1000 but count was " + count.n);
+ });
+
// Shut down the set and finish the test.
replTest.stopSet( signal );
}
-doTest( 15 );
+// doTest( 15 );
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index da39662221e..a5f4c7f75f5 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -33,16 +33,40 @@ doTest = function( signal ) {
var testDB = "repl-test";
+ var failed = false;
var callGetLastError = function(w, timeout, db) {
var result = master.getDB(db).runCommand({getlasterror: 1, w: w, wtimeout: timeout});
printjson( result );
- assert( result['ok'] == 1, "getLastError with w=" + w + " failed");
+ if(result['ok'] != 1) {
+ print("FAILURE");
+ failed = true;
+ }
}
- // Test getlasterror with a simple insert
+ // Test getlasterror with multiple inserts
// TEST FAILS HERE
+ print("**** Try inserting a multiple records -- first insert ****")
master.getDB(testDB).foo.insert({n: 1});
- callGetLastError(3, 60000, testDB);
+ master.getDB(testDB).foo.insert({n: 2});
+ master.getDB(testDB).foo.insert({n: 3});
+ callGetLastError(3, 10000, testDB);
+
+ m1 = master.getDB(testDB).foo.findOne({n: 1});
+ printjson( m1 );
+ assert( m1['n'] == 1 , "Failed to save to master on multiple inserts");
+
+ var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
+ assert( s0['n'] == 1 , "Failed to replicate to slave 0 on multiple inserts");
+
+ var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
+ assert( s1['n'] == 1 , "Failed to replicate to slave 1 on multiple inserts");
+
+
+ // Test getlasterror with a simple insert
+ print("**** Try inserting a single record ****")
+ master.getDB(testDB).dropDatabase();
+ master.getDB(testDB).foo.insert({n: 1});
+ callGetLastError(3, 10000, testDB);
m1 = master.getDB(testDB).foo.findOne({n: 1});
printjson( m1 );
@@ -74,6 +98,8 @@ doTest = function( signal ) {
verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
+ assert( failed == false, "Replication with getLastError failed. See errors." );
+
replTest.stopSet( signal );
}
diff --git a/s/chunk.cpp b/s/chunk.cpp
index cee7b0ea6cc..870e7aaa916 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -451,8 +451,11 @@ namespace mongo {
// not using regular count as this is more flexible and supports $min/$max
Query q = Query().minKey(_min).maxKey(_max);
- int n = conn->query(_manager->getns(), q, maxCount, 0, &fields)->itcount();
-
+ int n;
+ {
+ auto_ptr<DBClientCursor> c = conn->query(_manager->getns(), q, maxCount, 0, &fields);
+ n = c->itcount();
+ }
conn.done();
return n;
}
diff --git a/s/config.cpp b/s/config.cpp
index e6bb48870c4..50682c67cb2 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -228,6 +228,7 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor = conn->query( ShardNS::collection ,b.obj() );
+ assert( cursor.get() );
while ( cursor->more() ){
BSONObj o = cursor->next();
_collections[o["_id"].String()] = CollectionInfo( this , o );
@@ -488,6 +489,7 @@ namespace mongo {
ScopedDbConnection conn( _primary );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
+ assert( c.get() );
while ( c->more() ){
BSONObj o = c->next();
string name = o["_id"].valuestrsafe();
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index 12b6ee3ae0c..f027f8bc611 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -788,6 +788,7 @@ namespace mongo {
{ // 3. initial bulk clone
state = CLONE;
auto_ptr<DBClientCursor> cursor = conn->query( ns , Query().minKey( min ).maxKey( max ) , /* QueryOption_Exhaust */ 0 );
+ assert( cursor.get() );
while ( cursor->more() ){
BSONObj o = cursor->next();
{
diff --git a/s/d_state.cpp b/s/d_state.cpp
index 26e44a1fcdd..dd2fecef45d 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -184,6 +184,7 @@ namespace mongo {
}
auto_ptr<DBClientCursor> cursor = conn->query( "config.chunks" , Query(q).sort( "min" ) );
+ assert( cursor.get() );
if ( ! cursor->more() ){
if ( scoped.get() )
scoped->done();
diff --git a/s/shard.cpp b/s/shard.cpp
index 7879cc031dc..4ef68c0103c 100644
--- a/s/shard.cpp
+++ b/s/shard.cpp
@@ -33,6 +33,7 @@ namespace mongo {
{
ScopedDbConnection conn( configServer.getPrimary() );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , Query() );
+ assert( c.get() );
while ( c->more() ){
all.push_back( c->next().getOwned() );
}
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index 7b47a0fb2d1..9e20a3a031c 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -170,6 +170,7 @@ namespace mongo {
static DBClientBase * db = createDirectClient();
auto_ptr<DBClientCursor> c = db->query( coll , Query() );
+ assert( c.get() );
set<string> thisTime;
diff --git a/shell/servers.js b/shell/servers.js
index a4651353f92..08e8bfd6126 100644
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -1199,6 +1199,7 @@ ReplSetTest.prototype.awaitReplication = function() {
this.getMaster();
latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts']['t']
+ print(latest);
this.attempt({context: this, timeout: 30000, desc: "awaiting replication"},
function() {
@@ -1214,9 +1215,14 @@ ReplSetTest.prototype.awaitReplication = function() {
}
slave.getDB("admin").getMongo().setSlaveOk();
- var log = slave.getDB("local")['replset.minvalid'];
- if(log.find().hasNext()) {
- synced == synced && log.find().next()['ts']['t'];
+ var log = slave.getDB("local")['oplog.rs'];
+ if(log.find({}).sort({'$natural': -1}).limit(1).hasNext()) {
+ var entry = log.find({}).sort({'$natural': -1}).limit(1).next();
+ printjson( entry );
+ var ts = entry['ts']['t'];
+ print("TS for " + slave + " is " + ts + " and latest is " + latest);
+ print("Oplog size for " + slave + " is " + log.count());
+ synced = (synced && (latest == ts));
}
else {
synced = false;
@@ -1245,8 +1251,11 @@ ReplSetTest.prototype.start = function( n , options , restart ){
print("Starting....");
print( o );
- if ( restart )
- return startMongoProgram.apply( null , o );
+ if ( restart ) {
+ this.nodes[n] = startMongoProgram.apply( null , o );
+ printjson(this.nodes);
+ return this.nodes[n];
+ }
else {
return startMongod.apply( null , o );
}
diff --git a/util/concurrency/task.cpp b/util/concurrency/task.cpp
index 7e410c81174..6102666654e 100644
--- a/util/concurrency/task.cpp
+++ b/util/concurrency/task.cpp
@@ -120,13 +120,16 @@ namespace mongo {
rq = false;
while( 1 ) {
lam f;
- {
+ try {
boost::mutex::scoped_lock lk(m);
while( d.empty() )
c.wait(lk);
f = d.front();
d.pop_front();
}
+ catch(...) {
+ log() << "ERROR exception in Server:doWork?" << endl;
+ }
try {
f();
if( rq ) {
@@ -135,6 +138,8 @@ namespace mongo {
}
} catch(std::exception& e) {
log() << "Server::doWork() exception " << e.what() << endl;
+ } catch(...) {
+ log() << "Server::doWork() unknown exception!" << endl;
}
}
}