summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron <aaron@10gen.com>2010-08-02 14:26:36 -0700
committerAaron <aaron@10gen.com>2010-08-02 14:26:36 -0700
commit45f9479547d6ca0f263e1d266914a2d113723e65 (patch)
tree4c0357ccd4dec752d365bb5d1cf46a2773797d5f
parentead8bad5b2b2b95d6a63a40783bf3fdaa215ef65 (diff)
parentdbc11f0fdfd943fbf7bdb21a9040be799a9b4f8e (diff)
downloadmongo-45f9479547d6ca0f263e1d266914a2d113723e65.tar.gz
Merge branch 'master' of github.com:mongodb/mongo
-rw-r--r--bson/bson.cpp34
-rw-r--r--bson/bsondemo/bsondemo.vcxproj1
-rw-r--r--bson/bsondemo/bsondemo.vcxproj.filters1
-rw-r--r--client/dbclient.h1
-rw-r--r--client/parallel.cpp2
-rw-r--r--db/client.cpp8
-rw-r--r--db/client.h2
-rw-r--r--db/cloner.cpp3
-rw-r--r--db/cmdline.h10
-rw-r--r--db/db.cpp6
-rw-r--r--db/dbcommands.cpp29
-rw-r--r--db/dbcommands_generic.cpp20
-rw-r--r--db/jsobj.cpp16
-rw-r--r--db/lasterror.cpp2
-rw-r--r--db/lasterror.h23
-rw-r--r--db/oplog.cpp2
-rw-r--r--db/query.cpp17
-rw-r--r--db/repl.cpp2
-rw-r--r--db/repl/health.cpp63
-rw-r--r--db/repl/heartbeat.cpp16
-rw-r--r--db/repl/manager.cpp4
-rw-r--r--db/repl/replset_commands.cpp27
-rw-r--r--db/repl/rs.cpp15
-rw-r--r--db/repl/rs.h2
-rw-r--r--db/repl/rs_config.cpp6
-rw-r--r--db/repl/rs_initiate.cpp6
-rw-r--r--db/repl/rs_rollback.cpp4
-rw-r--r--db/repl_block.cpp5
-rw-r--r--db/replpair.h4
-rw-r--r--db/restapi.cpp2
-rw-r--r--jstests/replsets/replset2.js9
-rw-r--r--jstests/sharding/addshard1.js34
-rw-r--r--jstests/sharding/cursor1.js17
-rw-r--r--jstests/slowNightly/sharding_cursors1.js71
-rw-r--r--jstests/slowWeekly/conc_update.js46
-rw-r--r--s/chunk.cpp11
-rw-r--r--s/config.cpp2
-rw-r--r--s/cursors.cpp50
-rw-r--r--s/cursors.h13
-rw-r--r--s/d_migrate.cpp1
-rw-r--r--s/d_state.cpp1
-rw-r--r--s/dbgrid.vcxproj2
-rwxr-xr-xs/dbgrid.vcxproj.filters6
-rw-r--r--s/server.cpp3
-rw-r--r--s/shard.cpp1
-rw-r--r--s/strategy_shard.cpp4
-rw-r--r--scripting/engine.cpp1
-rw-r--r--shell/db.js19
-rw-r--r--shell/mongo_vstudio.cpp4
-rw-r--r--shell/msvc/mongo.vcxproj1
-rw-r--r--shell/msvc/mongo.vcxproj.filters3
-rw-r--r--shell/servers.js1
-rw-r--r--shell/utils.js34
-rw-r--r--util/background.cpp19
-rw-r--r--util/background.h19
-rw-r--r--util/concurrency/task.cpp7
-rw-r--r--util/message.cpp2
-rw-r--r--util/message_server.h1
-rw-r--r--util/message_server_port.cpp4
59 files changed, 572 insertions, 147 deletions
diff --git a/bson/bson.cpp b/bson/bson.cpp
new file mode 100644
index 00000000000..944bae053e3
--- /dev/null
+++ b/bson/bson.cpp
@@ -0,0 +1,34 @@
+/* bson.cpp
+*/
+
+#include "util/builder.h"
+
+namespace mongo {
+
+ /* there is an inline-only subset of the bson library; however, it is best if grow_reallocate function is not inlined to
+ improve performance. Thus grow_reallocate is here. The idea is that this file is a very MINIMAL set of code
+ for use when using the C++ BSON library and that it does not pull in a lot of other code prerequisites.
+
+ bsondemo.cpp will compile and link with itself and this file only (no libs) -- that's the idea.
+
+ jsobj.cpp currently #include's this file, so don't include both bson.cpp and jsobj.cpp in your project
+ at the same time -- just use jsobj.cpp if you need all that...
+
+ This is interim and will evolve, but gets bsondemo.cpp compiling again sans libraries.
+ */
+
+ /* BufBuilder --------------------------------------------------------*/
+
+ void BufBuilder::grow_reallocate() {
+ int a = size * 2;
+ if ( a == 0 )
+ a = 512;
+ if ( l > a )
+ a = l + 16 * 1024;
+ if( a > 64 * 1024 * 1024 )
+ msgasserted(10000, "BufBuilder grow() > 64MB");
+ data = (char *) realloc(data, a);
+ size= a;
+ }
+
+}
diff --git a/bson/bsondemo/bsondemo.vcxproj b/bson/bsondemo/bsondemo.vcxproj
index 101947efbc3..41cf8f228f3 100644
--- a/bson/bsondemo/bsondemo.vcxproj
+++ b/bson/bsondemo/bsondemo.vcxproj
@@ -170,6 +170,7 @@
</Link>
</ItemDefinitionGroup>
<ItemGroup>
+ <ClCompile Include="..\bson.cpp" />
<ClCompile Include="bsondemo.cpp" />
</ItemGroup>
<ItemGroup>
diff --git a/bson/bsondemo/bsondemo.vcxproj.filters b/bson/bsondemo/bsondemo.vcxproj.filters
index b0b5aa26d8c..6642f379433 100644
--- a/bson/bsondemo/bsondemo.vcxproj.filters
+++ b/bson/bsondemo/bsondemo.vcxproj.filters
@@ -2,6 +2,7 @@
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="bsondemo.cpp" />
+ <ClCompile Include="..\bson.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\ordering.h">
diff --git a/client/dbclient.h b/client/dbclient.h
index 76c5d3014b8..5da4f84af22 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -847,6 +847,7 @@ namespace mongo {
}
/** uses QueryOption_Exhaust
+ use DBClientCursorBatchIterator if you want to do items in large blocks, perhpas to avoid granular locking and such.
*/
unsigned long long query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
unsigned long long query( boost::function<void(DBClientCursorBatchIterator&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
diff --git a/client/parallel.cpp b/client/parallel.cpp
index d605ebd857e..a7568b2d83d 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -87,6 +87,8 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor =
conn->query( _ns , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options , _batchSize == 0 ? 0 : _batchSize + skipLeft );
+
+ assert( cursor.get() );
if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ){
conn.done();
diff --git a/db/client.cpp b/db/client.cpp
index af83653c5f2..65c467a2dc0 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -387,7 +387,13 @@ namespace mongo {
tablecell( ss , co.opNum() );
tablecell( ss , co.active() );
- tablecell( ss , co.getLockType() );
+ {
+ int lt = co.getLockType();
+ if( lt == -1 ) tablecell(ss, "R");
+ else if( lt == 1 ) tablecell(ss, "W");
+ else
+ tablecell( ss , lt);
+ }
tablecell( ss , co.isWaitingForLock() );
if ( co.active() )
tablecell( ss , co.elapsedSeconds() );
diff --git a/db/client.h b/db/client.h
index 5b6a5c37ded..2456d7fc90e 100644
--- a/db/client.h
+++ b/db/client.h
@@ -214,6 +214,8 @@ namespace mongo {
*/
bool shutdown();
+
+ /* this is for map/reduce writes */
bool isGod() const { return _god; }
friend class CurOp;
diff --git a/db/cloner.cpp b/db/cloner.cpp
index 79d0c082e87..96890bf1417 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -172,9 +172,10 @@ namespace mongo {
f.context = r._context;
DBClientConnection *remote = dynamic_cast< DBClientConnection* >( conn.get() );
if ( remote ) {
- remote->query( boost::function<void(DBClientCursorBatchIterator &)>( f ), from_collection, query, 0, options );
+ remote->query( boost::function<void(DBClientCursorBatchIterator &)>( f ), from_collection, query, 0, options );
} else { // no exhaust mode for direct client, so we have this hack
auto_ptr<DBClientCursor> c = conn->query( from_collection, query, 0, 0, 0, options );
+ assert( c.get() );
while( c->more() ) {
DBClientCursorBatchIterator i( *c );
f( i );
diff --git a/db/cmdline.h b/db/cmdline.h
index 853e20f0709..ef1bd57ab7f 100644
--- a/db/cmdline.h
+++ b/db/cmdline.h
@@ -28,7 +28,15 @@ namespace mongo {
string bind_ip; // --bind_ip
bool rest; // --rest
- string replSet; // --replSet <seedlist>
+ string _replSet; // --replSet[/<seedlist>]
+ string ourSetName() const {
+ string setname;
+ size_t sl = _replSet.find('/');
+ if( sl == string::npos )
+ return _replSet;
+ return _replSet.substr(0, sl);
+ }
+
string source; // --source
string only; // --only
diff --git a/db/db.cpp b/db/db.cpp
index f53addcfd7a..2d03e9cb92f 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -580,9 +580,9 @@ sendmore:
snapshotThread.go();
clientCursorMonitor.go();
- if( !cmdLine.replSet.empty() ) {
+ if( !cmdLine._replSet.empty() ) {
replSet = true;
- ReplSetCmdline *replSetCmdline = new ReplSetCmdline(cmdLine.replSet);
+ ReplSetCmdline *replSetCmdline = new ReplSetCmdline(cmdLine._replSet);
boost::thread t( boost::bind( &startReplSets, replSetCmdline) );
}
@@ -920,7 +920,7 @@ int main(int argc, char* argv[], char *envp[] )
}
if (params.count("replSet")) {
/* seed list of hosts for the repl set */
- cmdLine.replSet = params["replSet"].as<string>().c_str();
+ cmdLine._replSet = params["replSet"].as<string>().c_str();
}
if (params.count("only")) {
cmdLine.only = params["only"].as<string>().c_str();
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 76dab6d60be..582b04b8993 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -1707,6 +1707,35 @@ namespace mongo {
} dbhashCmd;
+ /* for diagnostic / testing purposes. */
+ class CmdSleep : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
+ help << "w:true write lock";
+ }
+ CmdSleep() : Command("sleep") { }
+ bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( cmdObj.getBoolField("w") ) {
+ writelock lk("");
+ sleepsecs(100);
+ }
+ else {
+ readlock lk("");
+ sleepsecs(100);
+ }
+ return true;
+ }
+ } cmdSleep;
+
class AvailableQueryOptions : public Command {
public:
AvailableQueryOptions() : Command( "availablequeryoptions" ){}
diff --git a/db/dbcommands_generic.cpp b/db/dbcommands_generic.cpp
index 338dbd0f6ee..627439483a8 100644
--- a/db/dbcommands_generic.cpp
+++ b/db/dbcommands_generic.cpp
@@ -64,26 +64,6 @@ namespace mongo {
}
} cmdBuildInfo;
- /* for diagnostic / testing purposes. */
- class CmdSleep : public Command {
- public:
- virtual LockType locktype() const { return READ; }
- virtual bool adminOnly() const { return true; }
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void help( stringstream& help ) const {
- help << "internal testing command. Makes db block (in a read lock) for 100 seconds";
- }
- CmdSleep() : Command("sleep") {}
- bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- sleepsecs(100);
- return true;
- }
- } cmdSleep;
/* just to check if the db has asserted */
class CmdAssertInfo : public Command {
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index e5745867375..feaa28d50c4 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -40,6 +40,8 @@ BOOST_STATIC_ASSERT( sizeof(double) == 8 );
BOOST_STATIC_ASSERT( sizeof(mongo::Date_t) == 8 );
BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
+#include "../bson/bson.cpp"
+
namespace mongo {
BSONElement nullElement;
@@ -1006,20 +1008,6 @@ namespace mongo {
eb.done();
}
- /* BufBuilder --------------------------------------------------------*/
-
- void BufBuilder::grow_reallocate() {
- int a = size * 2;
- if ( a == 0 )
- a = 512;
- if ( l > a )
- a = l + 16 * 1024;
- if( a > 64 * 1024 * 1024 )
- msgasserted(10000, "BufBuilder grow() > 64MB");
- data = (char *) realloc(data, a);
- size= a;
- }
-
/*-- test things ----------------------------------------------------*/
#pragma pack(1)
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index a1de4c63d03..9fc5512e072 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -37,7 +37,7 @@ namespace mongo {
/* might be intentional (non-user thread) */
OCCASIONALLY DEV if( !isShell ) log() << "warning dev: lastError==0 won't report:" << msg << endl;
} else if ( le->disabled ) {
- log() << "lastError disabled, can't report: " << msg << endl;
+ log() << "lastError disabled, can't report: " << code << ":" << msg << endl;
} else {
le->raiseError(code, msg);
}
diff --git a/db/lasterror.h b/db/lasterror.h
index 6c1feacdfb7..03ea6fcd4f4 100644
--- a/db/lasterror.h
+++ b/db/lasterror.h
@@ -71,6 +71,27 @@ namespace mongo {
writebackId.clear();
}
void appendSelf( BSONObjBuilder &b );
+
+ struct Disabled : boost::noncopyable {
+ Disabled( LastError * le ){
+ _le = le;
+ if ( _le ){
+ _prev = _le->disabled;
+ _le->disabled = true;
+ } else {
+ _prev = false;
+ }
+ }
+
+ ~Disabled(){
+ if ( _le )
+ _le->disabled = _prev;
+ }
+
+ LastError * _le;
+ bool _prev;
+ };
+
static LastError noError;
};
@@ -121,7 +142,7 @@ namespace mongo {
static mongo::mutex _idsmutex;
map<int,Status> _ids;
} lastError;
-
+
void raiseError(int code , const char *msg);
} // namespace mongo
diff --git a/db/oplog.cpp b/db/oplog.cpp
index 109b0fe75d5..4ad4ca97b12 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -300,7 +300,7 @@ namespace mongo {
const char * ns = "local.oplog.$main";
- bool rs = !cmdLine.replSet.empty();
+ bool rs = !cmdLine._replSet.empty();
if( rs )
ns = rsoplog;
diff --git a/db/query.cpp b/db/query.cpp
index 0f51f5cb0b2..814c77663cc 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -35,6 +35,7 @@
#include "queryoptimizer.h"
#include "lasterror.h"
#include "../s/d_logic.h"
+#include "repl_block.h"
namespace mongo {
@@ -715,6 +716,7 @@ namespace mongo {
}
}
else {
+
if ( _pq.returnKey() ){
BSONObjBuilder bb( _buf );
bb.appendKeys( _c->indexKeyPattern() , _c->currKey() );
@@ -723,6 +725,13 @@ namespace mongo {
else {
BSONObj js = _c->current();
assert( js.isValid() );
+
+ if ( _oplogReplay ){
+ BSONElement e = js["ts"];
+ if ( e.type() == Date || e.type() == Timestamp )
+ _slaveReadTill = e._opTime();
+ }
+
fillQueryResultFromObj( _buf , _pq.getFields() , js , (_pq.showDiskLoc() ? &cl : 0));
}
_n++;
@@ -778,6 +787,7 @@ namespace mongo {
} else {
setComplete();
}
+
}
void finishExplain( const BSONObj &suffix ) {
@@ -811,6 +821,11 @@ namespace mongo {
bool saveClientCursor() const { return _saveClientCursor; }
bool wouldSaveClientCursor() const { return _wouldSaveClientCursor; }
+ void finishForOplogReplay( ClientCursor * cc ){
+ if ( _oplogReplay && ! _slaveReadTill.isNull() )
+ cc->_slaveReadTill = _slaveReadTill;
+
+ }
private:
BufBuilder _buf;
const ParsedQuery& _pq;
@@ -842,6 +857,7 @@ namespace mongo {
Message &_response;
ExplainBuilder &_eb;
CurOp &_curop;
+ OpTime _slaveReadTill;
};
/* run a query -- includes checking for and running a Command */
@@ -1039,6 +1055,7 @@ namespace mongo {
exhaust = ns;
ss << " exhaust ";
}
+ dqo.finishForOplogReplay(cc);
}
QueryResult *qr = (QueryResult *) result.header();
diff --git a/db/repl.cpp b/db/repl.cpp
index 1d2408c24b4..f0bd02f4cd8 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -1720,7 +1720,7 @@ namespace mongo {
void oldRepl();
void startReplication() {
/* if we are going to be a replica set, we aren't doing other forms of replication. */
- if( !cmdLine.replSet.empty() ) {
+ if( !cmdLine._replSet.empty() ) {
if( replSettings.slave || replSettings.master || replPair ) {
log() << "***" << endl;
log() << "ERROR: can't use --slave or --master replication options with --replSet" << endl;
diff --git a/db/repl/health.cpp b/db/repl/health.cpp
index 1c1cd884751..28d936e50fc 100644
--- a/db/repl/health.cpp
+++ b/db/repl/health.cpp
@@ -185,6 +185,10 @@ namespace mongo {
ScopedConn conn(m->fullName());
auto_ptr<DBClientCursor> c = conn->query(rsoplog, Query().sort("$natural",1), 20, 0, &fields);
+ if( c.get() == 0 ) {
+ ss << "couldn't query " << rsoplog;
+ return;
+ }
static const char *h[] = {"ts","optime", "h","op","ns","rest",0};
ss << "<style type=\"text/css\" media=\"screen\">"
@@ -212,6 +216,10 @@ namespace mongo {
}
else {
auto_ptr<DBClientCursor> c = conn->query(rsoplog, Query().sort("$natural",-1), 20, 0, &fields);
+ if( c.get() == 0 ) {
+ ss << "couldn't query [2] " << rsoplog;
+ return;
+ }
string x;
bo o = c->next();
otEnd = o["ts"]._opTime();
@@ -270,6 +278,21 @@ namespace mongo {
order on all the different web ui's; that is less confusing for the operator. */
map<int,string> mp;
+ string myMinValid;
+ try {
+ readlocktry lk("local.replset.minvalid", 300);
+ if( lk.got() ) {
+ BSONObj mv;
+ if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ myMinValid = "minvalid:" + mv["ts"]._opTime().toString();
+ }
+ }
+ else myMinValid = ".";
+ }
+ catch(...) {
+ myMinValid = "exception fetching minvalid";
+ }
+
{
stringstream s;
/* self row */
@@ -283,8 +306,8 @@ namespace mongo {
s << td( _hbmsg );
stringstream q;
q << "/_replSetOplog?" << _self->id();
- s << td( a(q.str(), "", theReplSet->lastOpTimeWritten.toString()) );
- s << td("");
+ s << td( a(q.str(), myMinValid, theReplSet->lastOpTimeWritten.toString()) );
+ s << td(""); // skew
s << _tr();
mp[_self->hbinfo().id()] = s.str();
}
@@ -299,20 +322,6 @@ namespace mongo {
for( map<int,string>::const_iterator i = mp.begin(); i != mp.end(); i++ )
s << i->second;
s << _table();
-
- try {
- readlocktry lk("local.replset.minvalid", 1000);
- if( lk.got() ) {
- BSONObj mv;
- if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
- s << p( str::stream() << "minvalid: " << mv["ts"]._opTime().toString() );
- }
- }
- else s << p(".");
- }
- catch(...) {
- s << p("exception fetching minvalid?");
- }
}
@@ -329,24 +338,36 @@ namespace mongo {
}
void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const {
- Member *m =_members.head();
vector<BSONObj> v;
// add self
{
HostAndPort h(getHostName(), cmdLine.port);
- v.push_back(
- BSON( "name" << h.toString() << "self" << true <<
- "errmsg" << _self->lhb() ) );
+
+ BSONObjBuilder bb;
+ bb.append("_id", (int) _self->id());
+ bb.append("name", h.toString());
+ bb.append("health", 1.0);
+ bb.append("state", (int) box.getState().s);
+ string s = _self->lhb();
+ if( !s.empty() )
+ bb.append("errmsg", s);
+ bb.append("self", true);
+ v.push_back(bb.obj());
}
+ Member *m =_members.head();
while( m ) {
BSONObjBuilder bb;
+ bb.append("_id", (int) m->id());
bb.append("name", m->fullName());
bb.append("health", m->hbinfo().health);
+ bb.append("state", (int) m->state().s);
bb.append("uptime", (unsigned) (m->hbinfo().upSince ? (time(0)-m->hbinfo().upSince) : 0));
bb.appendTimeT("lastHeartbeat", m->hbinfo().lastHeartbeat);
- bb.append("errmsg", m->lhb());
+ string s = m->lhb();
+ if( !s.empty() )
+ bb.append("errmsg", s);
v.push_back(bb.obj());
m = m->next();
}
diff --git a/db/repl/heartbeat.cpp b/db/repl/heartbeat.cpp
index c751a21c450..78ce5d12477 100644
--- a/db/repl/heartbeat.cpp
+++ b/db/repl/heartbeat.cpp
@@ -59,13 +59,15 @@ namespace mongo {
errmsg = "incompatible replset protocol version";
return false;
}
- string s = string(cmdObj.getStringField("replSetHeartbeat"))+'/';
- if( !startsWith(cmdLine.replSet, s ) ) {
- errmsg = "repl set names do not match";
- log() << "cmdline: " << cmdLine.replSet << endl;
- log() << "s: " << s << endl;
- result.append("mismatch", true);
- return false;
+ {
+ string s = string(cmdObj.getStringField("replSetHeartbeat"));
+ if( cmdLine.ourSetName() != s ) {
+ errmsg = "repl set names do not match";
+ log() << "cmdline: " << cmdLine._replSet << endl;
+ log() << "s: " << s << endl;
+ result.append("mismatch", true);
+ return false;
+ }
}
result.append("rs", true);
diff --git a/db/repl/manager.cpp b/db/repl/manager.cpp
index e372ec4a6f8..e87068877b8 100644
--- a/db/repl/manager.cpp
+++ b/db/repl/manager.cpp
@@ -48,9 +48,9 @@ namespace mongo {
task::Server("rs Manager"), rs(_rs), busyWithElectSelf(false), _primary(NOPRIMARY)
{
}
-
+
Manager::~Manager() {
- log() << "should never be called?" << rsLog;
+ log() << "ERROR: ~Manager should never be called" << rsLog;
rs->mgr = 0;
assert(false);
}
diff --git a/db/repl/replset_commands.cpp b/db/repl/replset_commands.cpp
index 9c1c1c68d62..196d25cae1f 100644
--- a/db/repl/replset_commands.cpp
+++ b/db/repl/replset_commands.cpp
@@ -40,7 +40,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "Just for testing : do not use.\n";
}
- CmdReplSetTest() : ReplSetCommand("replSetTest", true) { }
+ CmdReplSetTest() : ReplSetCommand("replSetTest") { }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
@@ -57,11 +57,11 @@ namespace mongo {
public:
int rbid;
virtual void help( stringstream &help ) const {
- help << "internal";
+ help << "internal";
+ }
+ CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {
+ rbid = (int) curTimeMillis();
}
- CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID", true) {
- rbid = (int) curTimeMillis();
- }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
@@ -69,11 +69,12 @@ namespace mongo {
return true;
}
} cmdReplSetRBID;
- using namespace bson;
+
+ using namespace bson;
int getRBID(DBClientConnection *c) {
- bo info;
- c->simpleCommand("admin", &info, "replSetGetRBID");
- return info["rbid"].numberInt();
+ bo info;
+ c->simpleCommand("admin", &info, "replSetGetRBID");
+ return info["rbid"].numberInt();
}
class CmdReplSetGetStatus : public ReplSetCommand {
@@ -174,7 +175,7 @@ namespace mongo {
help << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
}
- CmdReplSetFreeze() : ReplSetCommand("replSetFreeze", true) { }
+ CmdReplSetFreeze() : ReplSetCommand("replSetFreeze") { }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
@@ -191,7 +192,7 @@ namespace mongo {
help << "http://www.mongodb.org/display/DOCS/Replica+Set+Commands";
}
- CmdReplSetStepDown() : ReplSetCommand("replSetStepDown", true) { }
+ CmdReplSetStepDown() : ReplSetCommand("replSetStepDown") { }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
@@ -235,7 +236,7 @@ namespace mongo {
s << p(t);
if( theReplSet == 0 ) {
- if( cmdLine.replSet.empty() )
+ if( cmdLine._replSet.empty() )
s << p("Not using --replSet");
else {
s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
@@ -266,7 +267,7 @@ namespace mongo {
);
if( theReplSet == 0 ) {
- if( cmdLine.replSet.empty() )
+ if( cmdLine._replSet.empty() )
s << p("Not using --replSet");
else {
s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
diff --git a/db/repl/rs.cpp b/db/repl/rs.cpp
index d16c31eb7e5..ce8d532944e 100644
--- a/db/repl/rs.cpp
+++ b/db/repl/rs.cpp
@@ -155,17 +155,18 @@ namespace mongo {
}
/** @param cfgString <setname>/<seedhost1>,<seedhost2> */
-/*
- ReplSet::ReplSet(string cfgString) : fatal(false) {
-
- }
-*/
void parseReplsetCmdLine(string cfgString, string& setname, vector<HostAndPort>& seeds, set<HostAndPort>& seedSet ) {
const char *p = cfgString.c_str();
const char *slash = strchr(p, '/');
- uassert(13093, "bad --replSet config string format is: <setname>/<seedhost1>,<seedhost2>[,...]", slash != 0 && p != slash);
- setname = string(p, slash-p);
+ if( slash )
+ setname = string(p, slash-p);
+ else
+ setname = p;
+ uassert(13093, "bad --replSet config string format is: <setname>[/<seedhost1>,<seedhost2>,...]", !setname.empty());
+
+ if( slash == 0 )
+ return;
p = slash + 1;
while( 1 ) {
diff --git a/db/repl/rs.h b/db/repl/rs.h
index 09b766ccf2f..0bdf531fbd9 100644
--- a/db/repl/rs.h
+++ b/db/repl/rs.h
@@ -372,7 +372,7 @@ namespace mongo {
*/
class ReplSetCommand : public Command {
protected:
- ReplSetCommand(const char * s, bool show=false) : Command(s) { }
+ ReplSetCommand(const char * s, bool show=false) : Command(s, show) { }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
virtual bool logTheOp() { return false; }
diff --git a/db/repl/rs_config.cpp b/db/repl/rs_config.cpp
index 52614b89db1..62d1c09c17e 100644
--- a/db/repl/rs_config.cpp
+++ b/db/repl/rs_config.cpp
@@ -147,7 +147,7 @@ namespace mongo {
void ReplSetConfig::check() const {
uassert(13132,
"nonmatching repl set name in _id field; check --replSet command line",
- startsWith(cmdLine.replSet, _id + '/'));
+ _id == cmdLine.ourSetName());
uassert(13308, "replSet bad config version #", version > 0);
uassert(13133, "replSet bad config no members", members.size() >= 1);
uassert(13309, "replSet bad config maximum number of members is 7 (for now)", members.size() <= 7);
@@ -262,9 +262,7 @@ namespace mongo {
}
else {
/* first, make sure other node is configured to be a replset. just to be safe. */
- size_t sl = cmdLine.replSet.find('/');
- assert( sl != string::npos );
- string setname = cmdLine.replSet.substr(0, sl);
+ string setname = cmdLine.ourSetName();
BSONObj cmd = BSON( "replSetHeartbeat" << setname );
int theirVersion;
BSONObj info;
diff --git a/db/repl/rs_initiate.cpp b/db/repl/rs_initiate.cpp
index 19a345bf4d1..cbb444fde58 100644
--- a/db/repl/rs_initiate.cpp
+++ b/db/repl/rs_initiate.cpp
@@ -146,7 +146,7 @@ namespace mongo {
if( ReplSet::startupStatus != ReplSet::EMPTYCONFIG ) {
result.append("startupStatus", ReplSet::startupStatus);
errmsg = "all members and seeds must be reachable to initiate set";
- result.append("info", cmdLine.replSet);
+ result.append("info", cmdLine._replSet);
return false;
}
@@ -159,14 +159,14 @@ namespace mongo {
string name;
vector<HostAndPort> seeds;
set<HostAndPort> seedSet;
- parseReplsetCmdLine(cmdLine.replSet, name, seeds, seedSet); // may throw...
+ parseReplsetCmdLine(cmdLine._replSet, name, seeds, seedSet); // may throw...
bob b;
b.append("_id", name);
bob members;
members.append("0", BSON( "_id" << 0 << "host" << HostAndPort::Me().toString() ));
for( unsigned i = 0; i < seeds.size(); i++ )
- members.append(bob::numStr(i), BSON( "_id" << i << "host" << seeds[i].toString()));
+ members.append(bob::numStr(i+1), BSON( "_id" << i+1 << "host" << seeds[i].toString()));
b.appendArray("members", members.obj());
configObj = b.obj();
log() << "replSet created this configuration for initiation : " << configObj.toString() << rsLog;
diff --git a/db/repl/rs_rollback.cpp b/db/repl/rs_rollback.cpp
index 42814101f76..dc0b89302d1 100644
--- a/db/repl/rs_rollback.cpp
+++ b/db/repl/rs_rollback.cpp
@@ -140,10 +140,10 @@ namespace mongo {
//auto_ptr<DBClientCursor> u = us->query(rsoplog, q, 0, 0, &fields, 0, 0);
- h.rbid = getRBID(them);
+ h.rbid = getRBID(them);
auto_ptr<DBClientCursor> t = them->query(rsoplog, q, 0, 0, &fields, 0, 0);
- if( !t->more() ) throw "remote oplog empty or unreadable";
+ if( t.get() == 0 || !t->more() ) throw "remote oplog empty or unreadable";
BSONObj ourObj = u.current();
OpTime ourTime = ourObj["ts"]._opTime();
diff --git a/db/repl_block.cpp b/db/repl_block.cpp
index 6f68789560b..9cff24f3839 100644
--- a/db/repl_block.cpp
+++ b/db/repl_block.cpp
@@ -24,6 +24,7 @@
#include "../util/background.h"
#include "../util/mongoutils/str.h"
#include "../client/dbclient.h"
+#include "replpair.h"
//#define REPLDEBUG(x) log() << "replBlock: " << x << endl;
#define REPLDEBUG(x)
@@ -154,8 +155,10 @@ namespace mongo {
RARELY {
REPLDEBUG( "looking for : " << op << " w=" << w );
}
- if ( w <= 1 || ! replSettings.master )
+
+ if ( w <= 1 || ! _isMaster() )
return true;
+
w--; // now this is the # of slaves i need
scoped_lock mylk(_mutex);
for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++){
diff --git a/db/replpair.h b/db/replpair.h
index 4897a0abf7a..19b79bd3eba 100644
--- a/db/replpair.h
+++ b/db/replpair.h
@@ -111,7 +111,7 @@ namespace mongo {
If 'client' is not specified, the current client is used.
*/
- inline bool _isMaster( const char *client = 0 ) {
+ inline bool _isMaster() {
if( replSet ) {
if( theReplSet )
return theReplSet->isPrimary();
@@ -142,7 +142,7 @@ namespace mongo {
return false;
}
inline bool isMaster(const char *client = 0) {
- if( _isMaster(client) )
+ if( _isMaster() )
return true;
if ( !client ) {
Database *database = cc().database();
diff --git a/db/restapi.cpp b/db/restapi.cpp
index e5107e673de..3fd39c24b3f 100644
--- a/db/restapi.cpp
+++ b/db/restapi.cpp
@@ -273,7 +273,7 @@ namespace mongo {
if( *replInfo )
ss << "\nreplInfo: " << replInfo << "\n\n";
if( replSet ) {
- ss << a("", "see replSetGetStatus link top of page") << "--replSet </a>" << cmdLine.replSet << '\n';
+ ss << a("", "see replSetGetStatus link top of page") << "--replSet </a>" << cmdLine._replSet << '\n';
}
if ( replAllDead )
ss << "<b>replication replAllDead=" << replAllDead << "</b>\n";
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index a5f4c7f75f5..eaa35ee9b87 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -35,7 +35,7 @@ doTest = function( signal ) {
var failed = false;
var callGetLastError = function(w, timeout, db) {
- var result = master.getDB(db).runCommand({getlasterror: 1, w: w, wtimeout: timeout});
+ var result = master.getDB(db).getLastErrorObj( w , timeout );
printjson( result );
if(result['ok'] != 1) {
print("FAILURE");
@@ -51,10 +51,14 @@ doTest = function( signal ) {
master.getDB(testDB).foo.insert({n: 3});
callGetLastError(3, 10000, testDB);
+ print("**** TEMP 1a ****")
+
m1 = master.getDB(testDB).foo.findOne({n: 1});
printjson( m1 );
assert( m1['n'] == 1 , "Failed to save to master on multiple inserts");
+ print("**** TEMP 1b ****")
+
var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
assert( s0['n'] == 1 , "Failed to replicate to slave 0 on multiple inserts");
@@ -72,6 +76,7 @@ doTest = function( signal ) {
printjson( m1 );
assert( m1['n'] == 1 , "Failed to save to master");
+
var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
assert( s0['n'] == 1 , "Failed to replicate to slave 0");
@@ -103,4 +108,4 @@ doTest = function( signal ) {
replTest.stopSet( signal );
}
-// doTest( 15 );
+doTest( 15 );
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index 8f5026d8547..f28feedeb3e 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -5,10 +5,17 @@ assert.eq( 1, s.config.shards.count(), "initial server count wrong" );
// create a shard and add a database; if the database is not duplicated the mongod should accepted
// it as shard
conn1 = startMongodTest( 29000 );
+
db1 = conn1.getDB( "testDB" );
-db1.foo.save( {a:1} );
+numObjs = 0;
+for (i=0; i<3; i++){
+ db1.foo.save( { a : i } );
+ numObjs++;
+}
db1.getLastError()
-assert( s.admin.runCommand( { addshard: "localhost:29000" } ).ok, "did not accepted non-duplicated shard" );
+
+newShard = "myShard";
+assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).ok, "did not accepted non-duplicated shard" );
// a mongod with an existing database name should not be allowed to become a shard
conn2 = startMongodTest( 29001 );
@@ -18,15 +25,32 @@ db2.getLastError()
db3 = conn2.getDB( "testDB" );
db3.foo.save( {a:1} );
db3.getLastError()
+
s.config.databases.find().forEach( printjson )
-assert( ! s.admin.runCommand( { addshard: "localhost:29001" } ).ok, "accepted mongod with duplicate db" );
+rejectedShard = "rejectedShard";
+assert( ! s.admin.runCommand( { addshard: "localhost:29001" , name : rejectedShard } ).ok, "accepted mongod with duplicate db" );
// check that all collection that were local to the mongod's are accessible through the mongos
sdb1 = s.getDB( "testDB" );
-assert.eq( 1 , sdb1.foo.count() , "wrong count for database that existed before addshard" );
-sdb2 = s.getDB( "otherDBxx" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count for database that existed before addshard" );
+sdb2 = s.getDB( "otherDB" );
assert.eq( 0 , sdb2.foo.count() , "database of rejected shard appears through mongos" );
+// make sure we can move a DB from the original mongod to a previoulsy existing shard
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), newShard , "DB primary is wrong" );
+origShard = s.getNonPrimaries( "testDB" )[0];
+s.adminCommand( { moveprimary : "testDB" , to : origShard } );
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), origShard , "DB primary didn't move" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count after moving datbase that existed before addshard" );
+
+// make sure we can shard the original collections
+sdb1.foo.ensureIndex( { a : 1 } ) // can't shard populated collection without an index
+s.adminCommand( { enablesharding : "testDB" } );
+s.adminCommand( { shardcollection : "testDB.foo" , key: { a : 1 } } );
+s.adminCommand( { split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } } );
+assert.eq( 2 , s.config.chunks.count(), "wrong chunk number after splitting collection that existed before" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count after splitting collection that existed before" );
+
stopMongod( 29000 );
stopMongod( 29001 );
s.stop();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 6b3b3bba51e..41a5d68415c 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -42,4 +42,21 @@ assert.eq( numObjs , cursor1.itcount() , "c1" );
assert.eq( numObjs , cursor2.itcount() , "c2" );
assert.eq( numObjs , cursor3.itcount() , "c3" );
+// test timeout
+gc(); gc();
+cur = db.foo.find().batchSize( 2 )
+assert( cur.next() , "T1" )
+assert( cur.next() , "T2" );
+before = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
+printjson( before )
+assert.eq( 1 , before.totalOpen , "TX1" )
+sleep( 6000 )
+assert( cur.next() , "T3" )
+assert( cur.next() , "T4" );
+sleep( 22000 )
+assert.throws( function(){ cur.next(); } , "T5" )
+after = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
+gc(); gc()
+assert.eq( 0 , after.totalOpen , "TX2" )
+
s.stop()
diff --git a/jstests/slowNightly/sharding_cursors1.js b/jstests/slowNightly/sharding_cursors1.js
new file mode 100644
index 00000000000..307e8d7cc5d
--- /dev/null
+++ b/jstests/slowNightly/sharding_cursors1.js
@@ -0,0 +1,71 @@
+s = new ShardingTest( "cursors1" , 2 , 0 , 1 , { chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = "x"
+while (bigString.length < 1024)
+ bigString += bigString;
+assert.eq(bigString.length, 1024, 'len');
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+toInsert = ( 1 * 1000 * 1000 );
+for (var i=0; i < toInsert; i++ ){
+ db.foo.insert( { i: i, r: Math.random(), s: bigString } );
+ assert.eq(db.getLastError(), null, 'no error'); //SERVER-1541
+}
+
+inserted = toInsert;
+for (var i=0; i < 10; i++ ){
+ //assert.gte(db.foo.count(), toInsert, 'inserted enough'); //sometimes fails
+ assert.gte(db.foo.count(), toInsert - 100, 'inserted enough');
+ inserted = Math.min(inserted, db.foo.count())
+ sleep (100);
+}
+
+print("\n\n\n **** inserted: " + inserted + '\n\n\n');
+
+/*
+
+var line = 0;
+try {
+ assert.gte(db.foo.find({}, {_id:1}).itcount(), inserted, 'itcount check - no sort - _id only');
+ line = 1;
+ assert.gte(db.foo.find({}, {_id:1}).sort({_id:1}).itcount(), inserted, 'itcount check - _id sort - _id only');
+ line = 2;
+
+ db.foo.ensureIndex({i:1});
+ db.foo.ensureIndex({r:1});
+ db.getLastError();
+ line = 3;
+
+ assert.gte(db.foo.find({}, {i:1}).sort({i:1}).itcount(), inserted, 'itcount check - i sort - i only');
+ line = 4;
+ assert.gte(db.foo.find({}, {_id:1}).sort({i:1}).itcount(), inserted, 'itcount check - i sort - _id only');
+ line = 5;
+
+ assert.gte(db.foo.find({}, {r:1}).sort({r:1}).itcount(), inserted, 'itcount check - r sort - r only');
+ line = 6;
+ assert.gte(db.foo.find({}, {_id:1}).sort({r:1}).itcount(), inserted, 'itcount check - r sort - _id only');
+ line = 7;
+
+ assert.gte(db.foo.find().itcount(), inserted, 'itcount check - no sort - full');
+ line = 8;
+ assert.gte(db.foo.find().sort({_id:1}).itcount(), inserted, 'itcount check - _id sort - full');
+ line = 9;
+ assert.gte(db.foo.find().sort({i:1}).itcount(), inserted, 'itcount check - i sort - full');
+ line = 10;
+ assert.gte(db.foo.find().sort({r:1}).itcount(), inserted, 'itcount check - r sort - full');
+ line = 11;
+} catch (e) {
+ print("***** finished through line " + line + " before exception");
+ throw e;
+}
+
+*/
+
+s.stop();
diff --git a/jstests/slowWeekly/conc_update.js b/jstests/slowWeekly/conc_update.js
new file mode 100644
index 00000000000..299259f8224
--- /dev/null
+++ b/jstests/slowWeekly/conc_update.js
@@ -0,0 +1,46 @@
+db = db.getSisterDB("concurrency")
+db.dropDatabase();
+
+NRECORDS=5*1024*1024 // this needs to be relatively big so that
+ // the update() will take a while, but it could
+ // probably be smaller.
+
+print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)")
+for (i=0; i<(NRECORDS); i++) {
+ db.conc.insert({x:i})
+ if ((i%(1024*1024))==0)
+ print("loaded " + i/(1024*1024) + " mibi-records")
+}
+
+print("making an index (this will take a while)")
+db.conc.ensureIndex({x:1})
+
+var c1=db.conc.count({x:{$lt:NRECORDS}})
+// this is just a flag that the child will toggle when it's done.
+db.concflag.update({}, {inprog:true}, true)
+
+updater=startParallelShell("db=db.getSisterDB('concurrency');\
+ db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
+ print(db.getLastError());\
+ db.concflag.update({},{inprog:false})");
+
+querycount=0;
+decrements=0;
+misses=0
+while (1) {
+ if (db.concflag.findOne().inprog) {
+ c2=db.conc.count({x:{$lt:NRECORDS}})
+ print(c2)
+ querycount++;
+ if (c2<c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ } else
+ break;
+ sleep(10);
+}
+print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+
+updater() // wait()
diff --git a/s/chunk.cpp b/s/chunk.cpp
index cee7b0ea6cc..c2e0cc78e7c 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -333,6 +333,7 @@ namespace mongo {
}
bool Chunk::splitIfShould( long dataWritten ){
+ LastError::Disabled d( lastError.get() );
try {
return _splitIfShould( dataWritten );
}
@@ -451,8 +452,12 @@ namespace mongo {
// not using regular count as this is more flexible and supports $min/$max
Query q = Query().minKey(_min).maxKey(_max);
- int n = conn->query(_manager->getns(), q, maxCount, 0, &fields)->itcount();
-
+ int n;
+ {
+ auto_ptr<DBClientCursor> c = conn->query(_manager->getns(), q, maxCount, 0, &fields);
+ assert( c.get() );
+ n = c->itcount();
+ }
conn.done();
return n;
}
@@ -625,6 +630,7 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor = conn->query(temp.getNS(), QUERY("ns" << _ns).sort("lastmod",1), 0, 0, 0, 0,
(DEBUG_BUILD ? 2 : 1000000)); // batch size. Try to induce potential race conditions in debug builds
+ assert( cursor.get() );
while ( cursor->more() ){
BSONObj d = cursor->next();
if ( d["isMaxMarker"].trueValue() ){
@@ -985,6 +991,7 @@ namespace mongo {
ScopedDbConnection conn( temp.modelServer() );
auto_ptr<DBClientCursor> cursor = conn->query(temp.getNS(), QUERY("ns" << _ns).sort("lastmod",1), 1 );
+ assert( cursor.get() );
BSONObj o;
if ( cursor->more() )
o = cursor->next();
diff --git a/s/config.cpp b/s/config.cpp
index e6bb48870c4..50682c67cb2 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -228,6 +228,7 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor = conn->query( ShardNS::collection ,b.obj() );
+ assert( cursor.get() );
while ( cursor->more() ){
BSONObj o = cursor->next();
_collections[o["_id"].String()] = CollectionInfo( this , o );
@@ -488,6 +489,7 @@ namespace mongo {
ScopedDbConnection conn( _primary );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
+ assert( c.get() );
while ( c->more() ){
BSONObj o = c->next();
string name = o["_id"].valuestrsafe();
diff --git a/s/cursors.cpp b/s/cursors.cpp
index eca6b78c8a6..6dd7a204116 100644
--- a/s/cursors.cpp
+++ b/s/cursors.cpp
@@ -21,6 +21,7 @@
#include "../client/connpool.h"
#include "../db/queryutil.h"
#include "../db/commands.h"
+#include "../util/background.h"
namespace mongo {
@@ -37,6 +38,12 @@ namespace mongo {
_done = false;
_id = 0;
+
+ if ( q.queryOptions & QueryOption_NoCursorTimeout ){
+ _lastAccessMillis = 0;
+ }
+ else
+ _lastAccessMillis = Listener::getElapsedTimeMillis();
}
ShardedClientCursor::~ShardedClientCursor(){
@@ -53,6 +60,17 @@ namespace mongo {
return _id;
}
+ void ShardedClientCursor::accessed(){
+ if ( _lastAccessMillis > 0 )
+ _lastAccessMillis = Listener::getElapsedTimeMillis();
+ }
+
+ long long ShardedClientCursor::idleTime( long long now ){
+ if ( _lastAccessMillis == 0 )
+ return 0;
+ return now - _lastAccessMillis;
+ }
+
bool ShardedClientCursor::sendNextBatch( Request& r , int ntoreturn ){
uassert( 10191 , "cursor already done" , ! _done );
@@ -101,7 +119,10 @@ namespace mongo {
return hasMore;
}
+
+ // ---- CursorCache -----
+ long long CursorCache::TIMEOUT = 600000;
CursorCache::CursorCache()
:_mutex( "CursorCache" ), _shardedTotal(0){
@@ -125,6 +146,7 @@ namespace mongo {
OCCASIONALLY log() << "Sharded CursorCache missing cursor id: " << id << endl;
return ShardedClientCursorPtr();
}
+ i->second->accessed();
return i->second;
}
@@ -222,7 +244,33 @@ namespace mongo {
result.append( "totalOpen" , (int)(_cursors.size() + _refs.size() ) );
}
+ void CursorCache::doTimeouts(){
+ long long now = Listener::getElapsedTimeMillis();
+ scoped_lock lk( _mutex );
+ for ( MapSharded::iterator i=_cursors.begin(); i!=_cursors.end(); ++i ){
+ long long idleFor = i->second->idleTime( now );
+ if ( idleFor < TIMEOUT ){
+ continue;
+ }
+ log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make log(1)
+ _cursors.erase( i );
+ }
+ }
+
CursorCache cursorCache;
+
+ class CursorTimeoutThread : public PeriodicBackgroundJob {
+ public:
+ CursorTimeoutThread() : PeriodicBackgroundJob( 4000 ){}
+ virtual string name() { return "cursorTimeout"; }
+ virtual void runLoop(){
+ cursorCache.doTimeouts();
+ }
+ } cursorTimeoutThread;
+
+ void CursorCache::startTimeoutThread(){
+ cursorTimeoutThread.go();
+ }
class CmdCursorInfo : public Command {
public:
@@ -234,6 +282,8 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
bool run(const string&, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
cursorCache.appendInfo( result );
+ if ( jsobj["setTimeout"].isNumber() )
+ CursorCache::TIMEOUT = jsobj["setTimeout"].numberLong();
return true;
}
} cmdCursorInfo;
diff --git a/s/cursors.h b/s/cursors.h
index 7eb8591d947..53c5b647557 100644
--- a/s/cursors.h
+++ b/s/cursors.h
@@ -42,6 +42,10 @@ namespace mongo {
bool sendNextBatch( Request& r ){ return sendNextBatch( r , _ntoreturn ); }
bool sendNextBatch( Request& r , int ntoreturn );
+ void accessed();
+ /** @return idle time in ms */
+ long long idleTime( long long now );
+
protected:
ClusteredCursor * _cursor;
@@ -53,6 +57,8 @@ namespace mongo {
bool _done;
long long _id;
+ long long _lastAccessMillis; // 0 means no timeout
+
};
typedef boost::shared_ptr<ShardedClientCursor> ShardedClientCursorPtr;
@@ -60,6 +66,8 @@ namespace mongo {
class CursorCache {
public:
+ static long long TIMEOUT;
+
typedef map<long long,ShardedClientCursorPtr> MapSharded;
typedef map<long long,string> MapNormal;
@@ -75,8 +83,11 @@ namespace mongo {
void gotKillCursors(Message& m );
void appendInfo( BSONObjBuilder& result );
-
+
long long genId();
+
+ void doTimeouts();
+ void startTimeoutThread();
private:
mutex _mutex;
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index 12b6ee3ae0c..f027f8bc611 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -788,6 +788,7 @@ namespace mongo {
{ // 3. initial bulk clone
state = CLONE;
auto_ptr<DBClientCursor> cursor = conn->query( ns , Query().minKey( min ).maxKey( max ) , /* QueryOption_Exhaust */ 0 );
+ assert( cursor.get() );
while ( cursor->more() ){
BSONObj o = cursor->next();
{
diff --git a/s/d_state.cpp b/s/d_state.cpp
index 26e44a1fcdd..dd2fecef45d 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -184,6 +184,7 @@ namespace mongo {
}
auto_ptr<DBClientCursor> cursor = conn->query( "config.chunks" , Query(q).sort( "min" ) );
+ assert( cursor.get() );
if ( ! cursor->more() ){
if ( scoped.get() )
scoped->done();
diff --git a/s/dbgrid.vcxproj b/s/dbgrid.vcxproj
index 0a0d9c0563b..83fbf689372 100644
--- a/s/dbgrid.vcxproj
+++ b/s/dbgrid.vcxproj
@@ -185,9 +185,11 @@
<ItemGroup>
<ClCompile Include="..\client\dbclientcursor.cpp" />
<ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\db\dbwebserver.cpp" />
<ClCompile Include="..\util\concurrency\thread_pool.cpp" />
<ClCompile Include="..\util\concurrency\vars.cpp" />
<ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\miniwebserver.cpp" />
<ClCompile Include="..\util\processinfo.cpp" />
<ClCompile Include="..\util\stringutils.cpp" />
<ClCompile Include="..\util\text.cpp" />
diff --git a/s/dbgrid.vcxproj.filters b/s/dbgrid.vcxproj.filters
index 20fdc802ade..bce75b4c70d 100755
--- a/s/dbgrid.vcxproj.filters
+++ b/s/dbgrid.vcxproj.filters
@@ -281,6 +281,12 @@
<ClCompile Include="..\util\processinfo.cpp">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\db\dbwebserver.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\miniwebserver.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="gridconfig.h">
diff --git a/s/server.cpp b/s/server.cpp
index df6b082d335..09cdb0fbbfc 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -31,6 +31,7 @@
#include "chunk.h"
#include "balance.h"
#include "grid.h"
+#include "cursors.h"
namespace mongo {
@@ -125,12 +126,14 @@ namespace mongo {
void start( const MessageServer::Options& opts ){
balancer.go();
+ cursorCache.startTimeoutThread();
log() << "waiting for connections on port " << cmdLine.port << endl;
//DbGridListener l(port);
//l.listen();
ShardedMessageHandler handler;
MessageServer * server = createServer( opts , &handler );
+ server->setAsTimeTracker();
server->run();
}
diff --git a/s/shard.cpp b/s/shard.cpp
index 7879cc031dc..4ef68c0103c 100644
--- a/s/shard.cpp
+++ b/s/shard.cpp
@@ -33,6 +33,7 @@ namespace mongo {
{
ScopedDbConnection conn( configServer.getPrimary() );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , Query() );
+ assert( c.get() );
while ( c->more() ){
all.push_back( c->next().getOwned() );
}
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index fc26581786a..91759fd579d 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -114,10 +114,12 @@ namespace mongo {
}
if ( cursor->sendNextBatch( r , ntoreturn ) ){
- log(6) << "\t cursor finished: " << id << endl;
+ // still more data
+ cursor->accessed();
return;
}
+ // we've exhausted the cursor
cursorCache.remove( id );
}
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index 7b47a0fb2d1..9e20a3a031c 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -170,6 +170,7 @@ namespace mongo {
static DBClientBase * db = createDirectClient();
auto_ptr<DBClientCursor> c = db->query( coll , Query() );
+ assert( c.get() );
set<string> thisTime;
diff --git a/shell/db.js b/shell/db.js
index 3a82ecb0798..829969540f2 100644
--- a/shell/db.js
+++ b/shell/db.js
@@ -478,6 +478,12 @@ DB.prototype.forceError = function(){
}
DB.prototype.getLastError = function( w , wtimeout ){
+ var res = this.getLastErrorObj( w , wtimeout );
+ if ( ! res.ok )
+ throw "getlasterror failed: " + tojson( res );
+ return res.err;
+}
+DB.prototype.getLastErrorObj = function( w , wtimeout ){
var cmd = { getlasterror : 1 };
if ( w ){
cmd.w = w;
@@ -485,12 +491,7 @@ DB.prototype.getLastError = function( w , wtimeout ){
cmd.wtimeout = wtimeout;
}
var res = this.runCommand( cmd );
- if ( ! res.ok )
- throw "getlasterror failed: " + tojson( res );
- return res.err;
-}
-DB.prototype.getLastErrorObj = function(){
- var res = this.runCommand( { getlasterror : 1 } );
+
if ( ! res.ok )
throw "getlasterror failed: " + tojson( res );
return res;
@@ -533,9 +534,9 @@ DB.prototype.tojson = function(){
DB.prototype.toString = function(){
return this._name;
-}
-
-DB.prototype.isMaster = function(){ return this.runCommand("isMaster");}
+}
+
+DB.prototype.isMaster = function () { return this.runCommand("isMaster"); }
DB.prototype.currentOp = function(){
return db.$cmd.sys.inprog.findOne();
diff --git a/shell/mongo_vstudio.cpp b/shell/mongo_vstudio.cpp
index 14de9667a56..c07a096976a 100644
--- a/shell/mongo_vstudio.cpp
+++ b/shell/mongo_vstudio.cpp
@@ -659,10 +659,12 @@ const char * jsconcatcode =
"print(\"\\trs.addArb(hostportstr) add a new member which is arbiterOnly:true\");\n"
"print(\"\\trs.stepDown() step down as primary (momentarily)\");\n"
"print(\"\\trs.conf() return configuration from local.system.replset\");\n"
+ "print(\"\\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()\");\n"
"print();\n"
"print(\"\\tdb.isMaster() check who is primary\");\n"
"print();\n"
"print(\"\\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info\");}\n"
+ "rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }\n"
"rs.status = function () { return db._adminCommand(\"replSetGetStatus\"); }\n"
"rs.isMaster = function () { return db.isMaster(); }\n"
"rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }\n"
@@ -990,7 +992,7 @@ const char * jsconcatcode =
"return this._name;}\n"
"DB.prototype.toString = function(){\n"
"return this._name;}\n"
- "DB.prototype.isMaster = function(){ return this.runCommand(\"isMaster\");}\n"
+ "DB.prototype.isMaster = function () { return this.runCommand(\"isMaster\"); }\n"
"DB.prototype.currentOp = function(){\n"
"return db.$cmd.sys.inprog.findOne();}\n"
"DB.prototype.currentOP = DB.prototype.currentOp;\n"
diff --git a/shell/msvc/mongo.vcxproj b/shell/msvc/mongo.vcxproj
index ac69f2d9dd8..b158b9ecd48 100644
--- a/shell/msvc/mongo.vcxproj
+++ b/shell/msvc/mongo.vcxproj
@@ -116,6 +116,7 @@
<ClCompile Include="..\..\s\d_util.cpp" />
<ClCompile Include="..\..\s\shardconnection.cpp" />
<ClCompile Include="..\..\util\background.cpp" />
+ <ClCompile Include="..\..\util\log.cpp" />
<ClCompile Include="..\..\util\mmap.cpp" />
<ClCompile Include="..\..\util\password.cpp" />
<ClCompile Include="..\..\util\text.cpp" />
diff --git a/shell/msvc/mongo.vcxproj.filters b/shell/msvc/mongo.vcxproj.filters
index 85309db6b79..426a8b06332 100644
--- a/shell/msvc/mongo.vcxproj.filters
+++ b/shell/msvc/mongo.vcxproj.filters
@@ -216,6 +216,9 @@
<ClCompile Include="..\mongo-server.cpp">
<Filter>shell\generated_from_js</Filter>
</ClCompile>
+ <ClCompile Include="..\..\util\log.cpp">
+ <Filter>shared source files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="..\..\SConstruct" />
diff --git a/shell/servers.js b/shell/servers.js
index 08e8bfd6126..51f3d8af3c6 100644
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -1101,6 +1101,7 @@ ReplSetTest.prototype.callIsMaster = function() {
this.nodeIds[master] = i;
}
else {
+ this.nodes[i].setSlaveOk();
this.liveNodes.slaves.push(this.nodes[i]);
this.nodeIds[this.nodes[i]] = i;
}
diff --git a/shell/utils.js b/shell/utils.js
index fd0e6281a3c..65fb52ed073 100644
--- a/shell/utils.js
+++ b/shell/utils.js
@@ -1046,22 +1046,24 @@ Geo.distance = function( a , b ){
Math.pow( bx - ax , 2 ) );
}
-rs = function () { return "try rs.help()"; }
-
-rs.help = function () {
- print("\trs.status() { replSetGetStatus : 1 } checks repl set status");
- print("\trs.initiate() { replSetInitiate : null } initiates set with default settings");
- print("\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg");
- print("\trs.add(hostportstr) add a new member to the set with default attributes");
- print("\trs.add(membercfgobj) add a new member to the set with extra attributes");
- print("\trs.addArb(hostportstr) add a new member which is arbiterOnly:true");
- print("\trs.stepDown() step down as primary (momentarily)");
- print("\trs.conf() return configuration from local.system.replset");
- print();
- print("\tdb.isMaster() check who is primary");
- print();
- print("\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info");
-}
+rs = function () { return "try rs.help()"; }
+
+rs.help = function () {
+ print("\trs.status() { replSetGetStatus : 1 } checks repl set status");
+ print("\trs.initiate() { replSetInitiate : null } initiates set with default settings");
+ print("\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg");
+ print("\trs.add(hostportstr) add a new member to the set with default attributes");
+ print("\trs.add(membercfgobj) add a new member to the set with extra attributes");
+ print("\trs.addArb(hostportstr) add a new member which is arbiterOnly:true");
+ print("\trs.stepDown() step down as primary (momentarily)");
+ print("\trs.conf() return configuration from local.system.replset");
+ print("\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()");
+ print();
+ print("\tdb.isMaster() check who is primary");
+ print();
+ print("\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info");
+}
+rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }
rs.status = function () { return db._adminCommand("replSetGetStatus"); }
rs.isMaster = function () { return db.isMaster(); }
rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }
diff --git a/util/background.cpp b/util/background.cpp
index df2162ae3e2..a6d8290d616 100644
--- a/util/background.cpp
+++ b/util/background.cpp
@@ -96,5 +96,24 @@ namespace mongo {
}
}
}
+
+ void PeriodicBackgroundJob::run(){
+ // want to handle first one differently so inShutdown is obeyed nicely
+ sleepmillis( _millis );
+
+ while ( ! inShutdown() ){
+ try {
+ runLoop();
+ }
+ catch ( std::exception& e ){
+ log( LL_ERROR ) << "PeriodicBackgroundJob [" << name() << "] error: " << e.what() << endl;
+ }
+ catch ( ... ){
+ log( LL_ERROR ) << "PeriodicBackgroundJob [" << name() << "] unknown error" << endl;
+ }
+
+ sleepmillis( _millis );
+ }
+ }
} // namespace mongo
diff --git a/util/background.h b/util/background.h
index 43f3ceb5ede..ee59455e069 100644
--- a/util/background.h
+++ b/util/background.h
@@ -91,4 +91,23 @@ namespace mongo {
volatile State state;
};
+ class PeriodicBackgroundJob : public BackgroundJob {
+ public:
+ PeriodicBackgroundJob( int millisToSleep )
+ : _millis( millisToSleep ){
+ }
+
+ virtual ~PeriodicBackgroundJob(){}
+
+ /** this gets called every millisToSleep ms */
+ virtual void runLoop() = 0;
+
+ virtual void run();
+
+
+ private:
+ int _millis;
+
+ };
+
} // namespace mongo
diff --git a/util/concurrency/task.cpp b/util/concurrency/task.cpp
index 7e410c81174..6102666654e 100644
--- a/util/concurrency/task.cpp
+++ b/util/concurrency/task.cpp
@@ -120,13 +120,16 @@ namespace mongo {
rq = false;
while( 1 ) {
lam f;
- {
+ try {
boost::mutex::scoped_lock lk(m);
while( d.empty() )
c.wait(lk);
f = d.front();
d.pop_front();
}
+ catch(...) {
+ log() << "ERROR exception in Server:doWork?" << endl;
+ }
try {
f();
if( rq ) {
@@ -135,6 +138,8 @@ namespace mongo {
}
} catch(std::exception& e) {
log() << "Server::doWork() exception " << e.what() << endl;
+ } catch(...) {
+ log() << "Server::doWork() unknown exception!" << endl;
}
}
}
diff --git a/util/message.cpp b/util/message.cpp
index caed47037da..d7c13dc7af0 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -168,7 +168,7 @@ namespace mongo {
const int ret = select(maxfd+1, fds, NULL, NULL, &maxSelectTime);
if (ret == 0){
- _elapsedTime += maxSelectTime.tv_usec / 1000;
+ _elapsedTime += ( 10000 - maxSelectTime.tv_usec ) / 1000;
continue;
}
_elapsedTime += ret; // assume 1ms to grab connection. very rough
diff --git a/util/message_server.h b/util/message_server.h
index 068b91d1ffe..9d6a8f25d7c 100644
--- a/util/message_server.h
+++ b/util/message_server.h
@@ -44,6 +44,7 @@ namespace mongo {
virtual ~MessageServer(){}
virtual void run() = 0;
+ virtual void setAsTimeTracker() = 0;
};
// TODO use a factory here to decide between port and asio variations
diff --git a/util/message_server_port.cpp b/util/message_server_port.cpp
index 6d795c8542b..9649e454bc9 100644
--- a/util/message_server_port.cpp
+++ b/util/message_server_port.cpp
@@ -107,6 +107,10 @@ namespace mongo {
}
}
+ virtual void setAsTimeTracker(){
+ Listener::setAsTimeTracker();
+ }
+
void run(){
initAndListen();
}