summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/background_job_test.cpp109
-rw-r--r--src/mongo/dbtests/balancer_policy_tests.cpp203
-rw-r--r--src/mongo/dbtests/basictests.cpp695
-rw-r--r--src/mongo/dbtests/btreetests.cpp59
-rw-r--r--src/mongo/dbtests/btreetests.inl1713
-rw-r--r--src/mongo/dbtests/clienttests.cpp197
-rw-r--r--src/mongo/dbtests/commandtests.cpp98
-rw-r--r--src/mongo/dbtests/counttests.cpp142
-rw-r--r--src/mongo/dbtests/cursortests.cpp305
-rw-r--r--src/mongo/dbtests/d_chunk_manager_tests.cpp467
-rw-r--r--src/mongo/dbtests/dbtests.cpp29
-rw-r--r--src/mongo/dbtests/dbtests.h25
-rw-r--r--src/mongo/dbtests/directclienttests.cpp103
-rw-r--r--src/mongo/dbtests/framework.cpp446
-rw-r--r--src/mongo/dbtests/framework.h199
-rw-r--r--src/mongo/dbtests/histogram_test.cpp94
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp2208
-rw-r--r--src/mongo/dbtests/jsontests.cpp1185
-rw-r--r--src/mongo/dbtests/jstests.cpp1052
-rw-r--r--src/mongo/dbtests/macrotests.cpp47
-rw-r--r--src/mongo/dbtests/matchertests.cpp163
-rw-r--r--src/mongo/dbtests/mmaptests.cpp219
-rw-r--r--src/mongo/dbtests/namespacetests.cpp1244
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp407
-rw-r--r--src/mongo/dbtests/perf/btreeperf.cpp442
-rw-r--r--src/mongo/dbtests/perf/perftest.cpp761
-rw-r--r--src/mongo/dbtests/perftests.cpp1029
-rw-r--r--src/mongo/dbtests/queryoptimizercursortests.cpp2521
-rw-r--r--src/mongo/dbtests/queryoptimizertests.cpp1063
-rw-r--r--src/mongo/dbtests/querytests.cpp1408
-rw-r--r--src/mongo/dbtests/queryutiltests.cpp989
-rw-r--r--src/mongo/dbtests/replsettests.cpp227
-rw-r--r--src/mongo/dbtests/repltests.cpp1228
-rw-r--r--src/mongo/dbtests/sharding.cpp56
-rw-r--r--src/mongo/dbtests/socktests.cpp48
-rw-r--r--src/mongo/dbtests/spin_lock_test.cpp114
-rwxr-xr-xsrc/mongo/dbtests/test.sln26
-rw-r--r--src/mongo/dbtests/test.vcxproj776
-rwxr-xr-xsrc/mongo/dbtests/test.vcxproj.filters939
-rw-r--r--src/mongo/dbtests/threadedtests.cpp649
-rw-r--r--src/mongo/dbtests/updatetests.cpp877
41 files changed, 24562 insertions, 0 deletions
diff --git a/src/mongo/dbtests/background_job_test.cpp b/src/mongo/dbtests/background_job_test.cpp
new file mode 100644
index 00000000000..f2bf7d86244
--- /dev/null
+++ b/src/mongo/dbtests/background_job_test.cpp
@@ -0,0 +1,109 @@
+// @file background_job_test.cpp
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../pch.h"
+#include <boost/thread/thread.hpp>
+
+#include "dbtests.h"
+#include "../util/time_support.h"
+#include "../util/background.h"
+
+namespace BackgroundJobTests {
+
+ // a global variable that can be accessed independent of the IncTester object below
+ // IncTester keeps it up-to-date
+ int GLOBAL_val;
+
+ class IncTester : public mongo::BackgroundJob {
+ public:
+ explicit IncTester( long long millis , bool selfDelete = false )
+ : BackgroundJob(selfDelete), _val(0), _millis(millis) { GLOBAL_val = 0; }
+
+ void waitAndInc( long long millis ) {
+ if ( millis )
+ mongo::sleepmillis( millis );
+ ++_val;
+ ++GLOBAL_val;
+ }
+
+ int getVal() { return _val; }
+
+ /* --- BackgroundJob virtuals --- */
+
+ string name() const { return "IncTester"; }
+
+ void run() { waitAndInc( _millis ); }
+
+ private:
+ int _val;
+ long long _millis;
+ };
+
+
+ class NormalCase {
+ public:
+ void run() {
+ IncTester tester( 0 /* inc without wait */ );
+ tester.go();
+ ASSERT( tester.wait() );
+ ASSERT_EQUALS( tester.getVal() , 1 );
+ }
+ };
+
+ class TimeOutCase {
+ public:
+ void run() {
+ IncTester tester( 1000 /* wait 1sec before inc-ing */ );
+ tester.go();
+ ASSERT( ! tester.wait( 100 /* ms */ ) ); // should time out
+ ASSERT_EQUALS( tester.getVal() , 0 );
+
+ // if we wait longer than the IncTester, we should see the increment
+ ASSERT( tester.wait( 1500 /* ms */ ) ); // should not time out
+ ASSERT_EQUALS( tester.getVal() , 1 );
+ }
+ };
+
+ class SelfDeletingCase {
+ public:
+ void run() {
+ BackgroundJob* j = new IncTester( 0 /* inc without wait */ , true /* self delete */ );
+ j->go();
+
+
+ // the background thread should have continued running and this test should pass the
+ // heap-checker as well
+ mongo::sleepmillis( 1000 );
+ ASSERT_EQUALS( GLOBAL_val, 1 );
+ }
+ };
+
+
+ class BackgroundJobSuite : public Suite {
+ public:
+ BackgroundJobSuite() : Suite( "background_job" ) {}
+
+ void setupTests() {
+ add< NormalCase >();
+ add< TimeOutCase >();
+ add< SelfDeletingCase >();
+ }
+
+ } backgroundJobSuite;
+
+} // namespace BackgroundJobTests
diff --git a/src/mongo/dbtests/balancer_policy_tests.cpp b/src/mongo/dbtests/balancer_policy_tests.cpp
new file mode 100644
index 00000000000..6f7c4a5dcd3
--- /dev/null
+++ b/src/mongo/dbtests/balancer_policy_tests.cpp
@@ -0,0 +1,203 @@
+// @file balancer_policy_test.cpp
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "dbtests.h"
+
+// TODO SERVER-1822
+//#include "../s/config.h" // for ShardFields
+//#include "../s/balancer_policy.h"
+
+namespace BalancerPolicyTests {
+
+//
+// TODO SERVER-1822
+//
+#if 0
+
+ typedef mongo::ShardFields sf; // fields from 'shards' colleciton
+ typedef mongo::LimitsFields lf; // fields from the balancer's limits map
+
+ class SizeMaxedShardTest {
+ public:
+ void run() {
+ BSONObj shard0 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) );
+ ASSERT( ! BalancerPolicy::isSizeMaxed( shard0 ) );
+
+ BSONObj shard1 = BSON( sf::maxSize(100LL) << lf::currSize(80LL) );
+ ASSERT( ! BalancerPolicy::isSizeMaxed( shard1 ) );
+
+ BSONObj shard2 = BSON( sf::maxSize(100LL) << lf::currSize(110LL) );
+ ASSERT( BalancerPolicy::isSizeMaxed( shard2 ) );
+
+ BSONObj empty;
+ ASSERT( ! BalancerPolicy::isSizeMaxed( empty ) );
+ }
+ };
+
+ class DrainingShardTest {
+ public:
+ void run() {
+ BSONObj shard0 = BSON( sf::draining(true) );
+ ASSERT( BalancerPolicy::isDraining( shard0 ) );
+
+ BSONObj shard1 = BSON( sf::draining(false) );
+ ASSERT( ! BalancerPolicy::isDraining( shard1 ) );
+
+ BSONObj empty;
+ ASSERT( ! BalancerPolicy::isDraining( empty ) );
+ }
+ };
+
+ class BalanceNormalTest {
+ public:
+ void run() {
+ // 2 chunks and 0 chunk shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunkMap["shard1"] = chunks;
+
+ // no limits
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(false) << lf::hasOpsQueued(false) );
+ BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(false) << lf::hasOpsQueued(false) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 1 );
+ ASSERT( c );
+ }
+ };
+
+ class BalanceDrainingTest {
+ public:
+ void run() {
+ // one normal, one draining
+ // 2 chunks and 0 chunk shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard1"] = chunks;
+
+ // shard0 is draining
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(true) );
+ BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(false) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
+ ASSERT( c );
+ ASSERT_EQUALS( c->to , "shard1" );
+ ASSERT_EQUALS( c->from , "shard0" );
+ ASSERT( ! c->chunk.isEmpty() );
+ }
+ };
+
+ class BalanceEndedDrainingTest {
+ public:
+ void run() {
+ // 2 chunks and 0 chunk (drain completed) shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunkMap["shard1"] = chunks;
+
+ // no limits
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(false) );
+ BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(true) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
+ ASSERT( ! c );
+ }
+ };
+
+ class BalanceImpasseTest {
+ public:
+ void run() {
+ // one maxed out, one draining
+ // 2 chunks and 0 chunk shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard1"] = chunks;
+
+ // shard0 is draining, shard1 is maxed out, shard2 has writebacks pending
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(true) );
+ BSONObj limits1 = BSON( sf::maxSize(1LL) << lf::currSize(1LL) << sf::draining(false) );
+ BSONObj limits2 = BSON( sf::maxSize(0LL) << lf::currSize(1LL) << lf::hasOpsQueued(true) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+ limitsMap["shard2"] = limits2;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
+ ASSERT( ! c );
+ }
+ };
+
+//
+// TODO SERVER-1822
+//
+#endif // #if 0
+
+ class All : public Suite {
+ public:
+ All() : Suite( "balancer_policy" ) {
+ }
+
+ void setupTests() {
+ // TODO SERVER-1822
+ // add< SizeMaxedShardTest >();
+ // add< DrainingShardTest >();
+ // add< BalanceNormalTest >();
+ // add< BalanceDrainingTest >();
+ // add< BalanceEndedDrainingTest >();
+ // add< BalanceImpasseTest >();
+ }
+ } allTests;
+
+} // namespace BalancerPolicyTests
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
new file mode 100644
index 00000000000..46a7dbc22bd
--- /dev/null
+++ b/src/mongo/dbtests/basictests.cpp
@@ -0,0 +1,695 @@
+// basictests.cpp : basic unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "dbtests.h"
+#include "../util/base64.h"
+#include "../util/array.h"
+#include "../util/text.h"
+#include "../util/queue.h"
+#include "../util/paths.h"
+#include "../util/stringutils.h"
+#include "../util/compress.h"
+#include "../db/db.h"
+
+namespace BasicTests {
+
+ class Rarely {
+ public:
+ void run() {
+ int first = 0;
+ int second = 0;
+ int third = 0;
+ for( int i = 0; i < 128; ++i ) {
+ incRarely( first );
+ incRarely2( second );
+ ONCE ++third;
+ }
+ ASSERT_EQUALS( 1, first );
+ ASSERT_EQUALS( 1, second );
+ ASSERT_EQUALS( 1, third );
+ }
+ private:
+ void incRarely( int &c ) {
+ RARELY ++c;
+ }
+ void incRarely2( int &c ) {
+ RARELY ++c;
+ }
+ };
+
+ class Base64Tests {
+ public:
+
+ void roundTrip( string s ) {
+ ASSERT_EQUALS( s , base64::decode( base64::encode( s ) ) );
+ }
+
+ void roundTrip( const unsigned char * _data , int len ) {
+ const char *data = (const char *) _data;
+ string s = base64::encode( data , len );
+ string out = base64::decode( s );
+ ASSERT_EQUALS( out.size() , static_cast<size_t>(len) );
+ bool broke = false;
+ for ( int i=0; i<len; i++ ) {
+ if ( data[i] != out[i] )
+ broke = true;
+ }
+ if ( ! broke )
+ return;
+
+ cout << s << endl;
+ for ( int i=0; i<len; i++ )
+ cout << hex << ( data[i] & 0xFF ) << dec << " ";
+ cout << endl;
+ for ( int i=0; i<len; i++ )
+ cout << hex << ( out[i] & 0xFF ) << dec << " ";
+ cout << endl;
+
+ ASSERT(0);
+ }
+
+ void run() {
+
+ ASSERT_EQUALS( "ZWxp" , base64::encode( "eli" , 3 ) );
+ ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" , 6 ) );
+ ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" ) );
+
+ ASSERT_EQUALS( "ZQ==" , base64::encode( "e" , 1 ) );
+ ASSERT_EQUALS( "ZWw=" , base64::encode( "el" , 2 ) );
+
+ roundTrip( "e" );
+ roundTrip( "el" );
+ roundTrip( "eli" );
+ roundTrip( "elio" );
+ roundTrip( "eliot" );
+ roundTrip( "eliots" );
+ roundTrip( "eliotsz" );
+
+ unsigned char z[] = { 0x1 , 0x2 , 0x3 , 0x4 };
+ roundTrip( z , 4 );
+
+ unsigned char y[] = {
+ 0x01, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
+ };
+ roundTrip( y , 4 );
+ roundTrip( y , 40 );
+ }
+ };
+
+ namespace stringbuildertests {
+#define SBTGB(x) ss << (x); sb << (x);
+
+ class Base {
+ virtual void pop() = 0;
+
+ public:
+ Base() {}
+ virtual ~Base() {}
+
+ void run() {
+ pop();
+ ASSERT_EQUALS( ss.str() , sb.str() );
+ }
+
+ stringstream ss;
+ StringBuilder sb;
+ };
+
+ class simple1 : public Base {
+ void pop() {
+ SBTGB(1);
+ SBTGB("yo");
+ SBTGB(2);
+ }
+ };
+
+ class simple2 : public Base {
+ void pop() {
+ SBTGB(1);
+ SBTGB("yo");
+ SBTGB(2);
+ SBTGB( 12123123123LL );
+ SBTGB( "xxx" );
+ SBTGB( 5.4 );
+ SBTGB( 5.4312 );
+ SBTGB( "yyy" );
+ SBTGB( (short)5 );
+ SBTGB( (short)(1231231231231LL) );
+ }
+ };
+
+ class reset1 {
+ public:
+ void run() {
+ StringBuilder sb;
+ sb << "1" << "abc" << "5.17";
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ sb.reset();
+ ASSERT_EQUALS( "" , sb.str() );
+ sb << "999";
+ ASSERT_EQUALS( "999" , sb.str() );
+ }
+ };
+
+ class reset2 {
+ public:
+ void run() {
+ StringBuilder sb;
+ sb << "1" << "abc" << "5.17";
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ sb.reset(1);
+ ASSERT_EQUALS( "" , sb.str() );
+ sb << "999";
+ ASSERT_EQUALS( "999" , sb.str() );
+ }
+ };
+
+ }
+
+ class sleeptest {
+ public:
+
+ void run() {
+ Timer t;
+ int matches = 0;
+ for( int p = 0; p < 3; p++ ) {
+ sleepsecs( 1 );
+ int sec = (t.millis() + 2)/1000;
+ if( sec == 1 )
+ matches++;
+ else
+ log() << "temp millis: " << t.millis() << endl;
+ ASSERT( sec >= 0 && sec <= 2 );
+ t.reset();
+ }
+ if ( matches < 2 )
+ log() << "matches:" << matches << endl;
+ ASSERT( matches >= 2 );
+
+ sleepmicros( 1527123 );
+ ASSERT( t.micros() > 1000000 );
+ ASSERT( t.micros() < 2000000 );
+
+ t.reset();
+ sleepmillis( 1727 );
+ ASSERT( t.millis() >= 1000 );
+ ASSERT( t.millis() <= 2500 );
+
+ {
+ int total = 1200;
+ int ms = 2;
+ t.reset();
+ for ( int i=0; i<(total/ms); i++ ) {
+ sleepmillis( ms );
+ }
+ {
+ int x = t.millis();
+ if ( x < 1000 || x > 2500 ) {
+ cout << "sleeptest finds sleep accuracy to be not great. x: " << x << endl;
+ ASSERT( x >= 1000 );
+ ASSERT( x <= 20000 );
+ }
+ }
+ }
+
+#ifdef __linux__
+ {
+ int total = 1200;
+ int micros = 100;
+ t.reset();
+ int numSleeps = 1000*(total/micros);
+ for ( int i=0; i<numSleeps; i++ ) {
+ sleepmicros( micros );
+ }
+ {
+ int y = t.millis();
+ if ( y < 1000 || y > 2500 ) {
+ cout << "sleeptest y: " << y << endl;
+ ASSERT( y >= 1000 );
+ /* ASSERT( y <= 100000 ); */
+ }
+ }
+ }
+#endif
+
+ }
+
+ };
+
+ class AssertTests {
+ public:
+
+ int x;
+
+ AssertTests() {
+ x = 0;
+ }
+
+ string foo() {
+ x++;
+ return "";
+ }
+ void run() {
+ uassert( -1 , foo() , 1 );
+ if( x != 0 ) {
+ ASSERT_EQUALS( 0 , x );
+ }
+ try {
+ uassert( -1 , foo() , 0 );
+ }
+ catch ( ... ) {}
+ ASSERT_EQUALS( 1 , x );
+ }
+ };
+
+ namespace ArrayTests {
+ class basic1 {
+ public:
+ void run() {
+ FastArray<int> a(100);
+ a.push_back( 5 );
+ a.push_back( 6 );
+
+ ASSERT_EQUALS( 2 , a.size() );
+
+ FastArray<int>::iterator i = a.begin();
+ ASSERT( i != a.end() );
+ ASSERT_EQUALS( 5 , *i );
+ ++i;
+ ASSERT( i != a.end() );
+ ASSERT_EQUALS( 6 , *i );
+ ++i;
+ ASSERT( i == a.end() );
+ }
+ };
+ };
+
+ class ThreadSafeStringTest {
+ public:
+ void run() {
+ ThreadSafeString s;
+ s = "eliot";
+ ASSERT_EQUALS( s , "eliot" );
+ ASSERT( s != "eliot2" );
+
+ ThreadSafeString s2 = s;
+ ASSERT_EQUALS( s2 , "eliot" );
+
+
+ {
+ string foo;
+ {
+ ThreadSafeString bar;
+ bar = "eliot2";
+ foo = bar.toString();
+ }
+ ASSERT_EQUALS( "eliot2" , foo );
+ }
+ }
+ };
+
+ class LexNumCmp {
+ public:
+ void run() {
+
+ ASSERT( ! isNumber( (char)255 ) );
+
+ ASSERT_EQUALS( 0, lexNumCmp( "a", "a" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "aa" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "aa", "a" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "b" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "100", "50" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "50", "100" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "b", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "aa", "aa" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa", "ab" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "ab", "aa" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "0", "a" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a0", "aa" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "0" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa", "a0" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "0" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "10", "10" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "1", "10" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "10", "1" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "11", "10" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "10", "11" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f11f", "f10f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f10f", "f11f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f11f", "f111" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f111", "f11f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f12f", "f12g" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f12g", "f12f" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aab" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa{", "aa1" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a11" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a1{a", "a1{" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a1{a" ) );
+ ASSERT_EQUALS( 1, lexNumCmp("21", "11") );
+ ASSERT_EQUALS( -1, lexNumCmp("11", "21") );
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.0" , "a.1" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.0.b" , "a.1" ) );
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "b." , "b.|" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "b.0e" , (string("b.") + (char)255).c_str() ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "b." , "b.0e" ) );
+
+ ASSERT_EQUALS( 0, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "000238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 1, lexNumCmp( "000238947219478347782934718235", "238947219478347782934718234"));
+ ASSERT_EQUALS( -1, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "238", "000238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "00002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "000"));
+ ASSERT_EQUALS( -1, lexNumCmp( "0000", "0.0"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2380", "238"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "2384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "02384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "002384"));
+ ASSERT_EQUALS( -1, lexNumCmp( "123.234.4567", "00238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "123.234", "00123.234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "a.123.b", "a.00123.b"));
+ ASSERT_EQUALS( 1, lexNumCmp( "a.123.b", "a.b.00123.b"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a.00.0", "a.0.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "01.003.02", "1.3.2"));
+ ASSERT_EQUALS( -1, lexNumCmp( "1.3.2", "10.300.20"));
+ ASSERT_EQUALS( 0, lexNumCmp( "10.300.20", "000000000000010.0000300.000000020"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "000a", "001a"));
+ ASSERT_EQUALS( 0, lexNumCmp( "010a", "0010a"));
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "a0" , "a00" ) );
+ ASSERT_EQUALS( 0 , lexNumCmp( "a.0" , "a.00" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.b.c.d0" , "a.b.c.d00" ) );
+ ASSERT_EQUALS( 1 , lexNumCmp( "a.b.c.0.y" , "a.b.c.00.x" ) );
+
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "a-" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a-", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-", "a-" ) );
+
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "a-c" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a-c", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-c", "a-c" ) );
+
+ ASSERT_EQUALS( 1, lexNumCmp( "a-c.t", "a.t" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a.t", "a-c.t" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-c.t", "a-c.t" ) );
+
+ ASSERT_EQUALS( 1, lexNumCmp( "ac.t", "a.t" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a.t", "ac.t" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "ac.t", "ac.t" ) );
+ }
+ };
+
+ class DatabaseValidNames {
+ public:
+ void run() {
+ ASSERT( NamespaceString::validDBName( "foo" ) );
+ ASSERT( ! NamespaceString::validDBName( "foo/bar" ) );
+ ASSERT( ! NamespaceString::validDBName( "foo bar" ) );
+ ASSERT( ! NamespaceString::validDBName( "foo.bar" ) );
+
+ ASSERT( NamespaceString::normal( "asdads" ) );
+ ASSERT( ! NamespaceString::normal( "asda$ds" ) );
+ ASSERT( NamespaceString::normal( "local.oplog.$main" ) );
+ }
+ };
+
+ class DatabaseOwnsNS {
+ public:
+ void run() {
+ dblock lk;
+ bool isNew = false;
+ // this leaks as ~Database is private
+ // if that changes, should put this on the stack
+ {
+ Database * db = new Database( "dbtests_basictests_ownsns" , isNew );
+ assert( isNew );
+
+ ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x" ) );
+ ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x.y" ) );
+ ASSERT( ! db->ownsNS( "dbtests_basictests_ownsn.x.y" ) );
+ ASSERT( ! db->ownsNS( "dbtests_basictests_ownsnsa.x.y" ) );
+ }
+ }
+ };
+
+ class NSValidNames {
+ public:
+ void run() {
+ ASSERT( isValidNS( "test.foo" ) );
+ ASSERT( ! isValidNS( "test." ) );
+ ASSERT( ! isValidNS( "test" ) );
+ }
+ };
+
+ class PtrTests {
+ public:
+ void run() {
+ scoped_ptr<int> p1 (new int(1));
+ boost::shared_ptr<int> p2 (new int(2));
+ scoped_ptr<const int> p3 (new int(3));
+ boost::shared_ptr<const int> p4 (new int(4));
+
+ //non-const
+ ASSERT_EQUALS( p1.get() , ptr<int>(p1) );
+ ASSERT_EQUALS( p2.get() , ptr<int>(p2) );
+ ASSERT_EQUALS( p2.get() , ptr<int>(p2.get()) ); // T* constructor
+ ASSERT_EQUALS( p2.get() , ptr<int>(ptr<int>(p2)) ); // copy constructor
+ ASSERT_EQUALS( *p2 , *ptr<int>(p2));
+ ASSERT_EQUALS( p2.get() , ptr<boost::shared_ptr<int> >(&p2)->get() ); // operator->
+
+ //const
+ ASSERT_EQUALS( p1.get() , ptr<const int>(p1) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(p2) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(p2.get()) );
+ ASSERT_EQUALS( p3.get() , ptr<const int>(p3) );
+ ASSERT_EQUALS( p4.get() , ptr<const int>(p4) );
+ ASSERT_EQUALS( p4.get() , ptr<const int>(p4.get()) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<const int>(p2)) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<int>(p2)) ); // constizing copy constructor
+ ASSERT_EQUALS( *p2 , *ptr<int>(p2));
+ ASSERT_EQUALS( p2.get() , ptr<const boost::shared_ptr<int> >(&p2)->get() );
+
+ //bool context
+ ASSERT( ptr<int>(p1) );
+ ASSERT( !ptr<int>(NULL) );
+ ASSERT( !ptr<int>() );
+
+#if 0
+ // These shouldn't compile
+ ASSERT_EQUALS( p3.get() , ptr<int>(p3) );
+ ASSERT_EQUALS( p4.get() , ptr<int>(p4) );
+ ASSERT_EQUALS( p2.get() , ptr<int>(ptr<const int>(p2)) );
+#endif
+ }
+ };
+
+ struct StringSplitterTest {
+
+ void test( string s ) {
+ vector<string> v = StringSplitter::split( s , "," );
+ ASSERT_EQUALS( s , StringSplitter::join( v , "," ) );
+ }
+
+ void run() {
+ test( "a" );
+ test( "a,b" );
+ test( "a,b,c" );
+ }
+ };
+
+ struct IsValidUTF8Test {
+// macros used to get valid line numbers
+#define good(s) ASSERT(isValidUTF8(s));
+#define bad(s) ASSERT(!isValidUTF8(s));
+
+ void run() {
+ good("A");
+ good("\xC2\xA2"); // cent: ¢
+ good("\xE2\x82\xAC"); // euro: €
+ good("\xF0\x9D\x90\x80"); // Blackboard A: 𝐀
+
+ //abrupt end
+ bad("\xC2");
+ bad("\xE2\x82");
+ bad("\xF0\x9D\x90");
+ bad("\xC2 ");
+ bad("\xE2\x82 ");
+ bad("\xF0\x9D\x90 ");
+
+ //too long
+ bad("\xF8\x80\x80\x80\x80");
+ bad("\xFC\x80\x80\x80\x80\x80");
+ bad("\xFE\x80\x80\x80\x80\x80\x80");
+ bad("\xFF\x80\x80\x80\x80\x80\x80\x80");
+
+ bad("\xF5\x80\x80\x80"); // U+140000 > U+10FFFF
+ bad("\x80"); //cant start with continuation byte
+ bad("\xC0\x80"); // 2-byte version of ASCII NUL
+#undef good
+#undef bad
+ }
+ };
+
+
+ class QueueTest {
+ public:
+ void run() {
+ BlockingQueue<int> q;
+ Timer t;
+ int x;
+ ASSERT( ! q.blockingPop( x , 5 ) );
+ ASSERT( t.seconds() > 3 && t.seconds() < 9 );
+
+ }
+ };
+
+ class StrTests {
+ public:
+
+ void run() {
+ ASSERT_EQUALS( 1u , str::count( "abc" , 'b' ) );
+ ASSERT_EQUALS( 3u , str::count( "babab" , 'b' ) );
+ }
+
+ };
+
+ class HostAndPortTests {
+ public:
+ void run() {
+ HostAndPort a( "x1" , 1000 );
+ HostAndPort b( "x1" , 1000 );
+ HostAndPort c( "x1" , 1001 );
+ HostAndPort d( "x2" , 1000 );
+
+ ASSERT( a == b );
+ ASSERT( a != c );
+ ASSERT( a != d );
+
+ }
+ };
+
+ class RelativePathTest {
+ public:
+ void run() {
+ RelativePath a = RelativePath::fromRelativePath( "a" );
+ RelativePath b = RelativePath::fromRelativePath( "a" );
+ RelativePath c = RelativePath::fromRelativePath( "b" );
+ RelativePath d = RelativePath::fromRelativePath( "a/b" );
+
+
+ ASSERT( a == b );
+ ASSERT( a != c );
+ ASSERT( a != d );
+ ASSERT( c != d );
+ }
+ };
+
+ class CmdLineParseConfigTest {
+ public:
+ void run() {
+ stringstream ss1;
+ istringstream iss1("");
+ CmdLine::parseConfigFile( iss1, ss1 );
+ stringstream ss2;
+ istringstream iss2("password=\'foo bar baz\'");
+ CmdLine::parseConfigFile( iss2, ss2 );
+ stringstream ss3;
+ istringstream iss3("\t this = false \n#that = true\n #another = whocares\n\n other = monkeys ");
+ CmdLine::parseConfigFile( iss3, ss3 );
+
+ ASSERT( ss1.str().compare("\n") == 0 );
+ ASSERT( ss2.str().compare("password=\'foo bar baz\'\n\n") == 0 );
+ ASSERT( ss3.str().compare("\n other = monkeys \n\n") == 0 );
+ }
+ };
+
+ struct CompressionTest1 {
+ void run() {
+ const char * c = "this is a test";
+ std::string s;
+ size_t len = compress(c, strlen(c)+1, &s);
+ assert( len > 0 );
+
+ std::string out;
+ bool ok = uncompress(s.c_str(), s.size(), &out);
+ assert(ok);
+ assert( strcmp(out.c_str(), c) == 0 );
+ }
+ } ctest1;
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "basic" ) {
+ }
+
+ void setupTests() {
+ add< Rarely >();
+ add< Base64Tests >();
+
+ add< stringbuildertests::simple1 >();
+ add< stringbuildertests::simple2 >();
+ add< stringbuildertests::reset1 >();
+ add< stringbuildertests::reset2 >();
+
+ add< sleeptest >();
+ add< AssertTests >();
+
+ add< ArrayTests::basic1 >();
+ add< LexNumCmp >();
+
+ add< DatabaseValidNames >();
+ add< DatabaseOwnsNS >();
+
+ add< NSValidNames >();
+
+ add< PtrTests >();
+
+ add< StringSplitterTest >();
+ add< IsValidUTF8Test >();
+
+ add< QueueTest >();
+
+ add< StrTests >();
+
+ add< HostAndPortTests >();
+ add< RelativePathTest >();
+ add< CmdLineParseConfigTest >();
+
+ add< CompressionTest1 >();
+ }
+ } myall;
+
+} // namespace BasicTests
+
diff --git a/src/mongo/dbtests/btreetests.cpp b/src/mongo/dbtests/btreetests.cpp
new file mode 100644
index 00000000000..efa42b1d5c1
--- /dev/null
+++ b/src/mongo/dbtests/btreetests.cpp
@@ -0,0 +1,59 @@
+// btreetests.cpp : Btree unit tests
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "../db/db.h"
+#include "../db/btree.h"
+
+#include "dbtests.h"
+
+#define BtreeBucket BtreeBucket<V0>
+#define btree btree<V0>
+#define btreemod btreemod<V0>
+#define Continuation Continuation<V0>
+#define testName "btree"
+#define BTVERSION 0
+namespace BtreeTests0 {
+ #include "btreetests.inl"
+}
+
+#undef BtreeBucket
+#undef btree
+#undef btreemod
+#undef Continuation
+#define BtreeBucket BtreeBucket<V1>
+#define btree btree<V1>
+#define btreemod btreemod<V1>
+#define Continuation Continuation<V1>
+#undef testName
+#define testName "btree1"
+#undef BTVERSION
+#define BTVERSION 1
+namespace BtreeTests1 {
+ #include "btreetests.inl"
+}
+
+#undef testName
+#define testName "btree1_twostep"
+#define TESTTWOSTEP 1
+
+namespace BtreeTests2 {
+ #include "btreetests.inl"
+}
diff --git a/src/mongo/dbtests/btreetests.inl b/src/mongo/dbtests/btreetests.inl
new file mode 100644
index 00000000000..824313e6a54
--- /dev/null
+++ b/src/mongo/dbtests/btreetests.inl
@@ -0,0 +1,1713 @@
+ typedef BtreeBucket::_KeyNode _KeyNode;
+
+ const char* ns() {
+ return "unittests.btreetests";
+ }
+
+ // dummy, valid record loc
+ const DiskLoc recordLoc() {
+ return DiskLoc( 0, 2 );
+ }
+
+ class Ensure {
+ public:
+ Ensure() {
+ _c.ensureIndex( ns(), BSON( "a" << 1 ), false, "testIndex",
+ false, // given two versions not sure if cache true would mess us up...
+ false, BTVERSION);
+ }
+ ~Ensure() {
+ _c.dropCollection( ns() );
+ //_c.dropIndexes( ns() );
+ }
+ private:
+ DBDirectClient _c;
+ };
+
+ class Base : public Ensure {
+ public:
+ Base() :
+ _context( ns() ) {
+ {
+ bool f = false;
+ assert( f = true );
+ massert( 10402 , "assert is misdefined", f);
+ }
+ }
+ virtual ~Base() {}
+ static string bigNumString( long long n, int len = 800 ) {
+ char sub[17];
+ sprintf( sub, "%.16llx", n );
+ string val( len, ' ' );
+ for( int i = 0; i < len; ++i ) {
+ val[ i ] = sub[ i % 16 ];
+ }
+ return val;
+ }
+ protected:
+ const BtreeBucket* bt() {
+ return id().head.btree();
+ }
+ DiskLoc dl() {
+ return id().head;
+ }
+ IndexDetails& id() {
+ NamespaceDetails *nsd = nsdetails( ns() );
+ assert( nsd );
+ return nsd->idx( 1 );
+ }
+ void checkValid( int nKeys ) {
+ ASSERT( bt() );
+ ASSERT( bt()->isHead() );
+ bt()->assertValid( order(), true );
+ ASSERT_EQUALS( nKeys, bt()->fullValidate( dl(), order(), 0, true ) );
+ }
+ void dump() {
+ bt()->dumpTree( dl(), order() );
+ }
+ void insert( BSONObj &key ) {
+ const BtreeBucket *b = bt();
+
+#if defined(TESTTWOSTEP)
+ {
+ Continuation c(dl(), recordLoc(), key, Ordering::make(order()), id());
+ b->twoStepInsert(dl(), c, true);
+ c.stepTwo();
+ }
+#else
+ {
+ b->bt_insert( dl(), recordLoc(), key, Ordering::make(order()), true, id(), true );
+ }
+#endif
+ getDur().commitIfNeeded();
+ }
+ bool unindex( BSONObj &key ) {
+ getDur().commitIfNeeded();
+ return bt()->unindex( dl(), id(), key, recordLoc() );
+ }
+ static BSONObj simpleKey( char c, int n = 1 ) {
+ BSONObjBuilder builder;
+ string val( n, c );
+ builder.append( "a", val );
+ return builder.obj();
+ }
+ void locate( BSONObj &key, int expectedPos,
+ bool expectedFound, const DiskLoc &expectedLocation,
+ int direction = 1 ) {
+ int pos;
+ bool found;
+ DiskLoc location =
+ bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
+ ASSERT_EQUALS( expectedFound, found );
+ ASSERT( location == expectedLocation );
+ ASSERT_EQUALS( expectedPos, pos );
+ }
+ bool present( BSONObj &key, int direction ) {
+ int pos;
+ bool found;
+ bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
+ return found;
+ }
+ BSONObj order() {
+ return id().keyPattern();
+ }
+ const BtreeBucket *child( const BtreeBucket *b, int i ) {
+ assert( i <= b->nKeys() );
+ DiskLoc d;
+ if ( i == b->nKeys() ) {
+ d = b->getNextChild();
+ }
+ else {
+ d = b->keyNode( i ).prevChildBucket;
+ }
+ assert( !d.isNull() );
+ return d.btree();
+ }
+ void checkKey( char i ) {
+ stringstream ss;
+ ss << i;
+ checkKey( ss.str() );
+ }
+ void checkKey( const string &k ) {
+ BSONObj key = BSON( "" << k );
+// log() << "key: " << key << endl;
+ ASSERT( present( key, 1 ) );
+ ASSERT( present( key, -1 ) );
+ }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ checkValid( 0 );
+ }
+ };
+
+ class SimpleInsertDelete : public Base {
+ public:
+ void run() {
+ BSONObj key = simpleKey( 'z' );
+ insert( key );
+
+ checkValid( 1 );
+ locate( key, 0, true, dl() );
+
+ unindex( key );
+
+ checkValid( 0 );
+ locate( key, 0, false, DiskLoc() );
+ }
+ };
+
+ class SplitUnevenBucketBase : public Base {
+ public:
+ virtual ~SplitUnevenBucketBase() {}
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ BSONObj shortKey = simpleKey( shortToken( i ), 1 );
+ insert( shortKey );
+ BSONObj longKey = simpleKey( longToken( i ), 800 );
+ insert( longKey );
+ }
+ checkValid( 20 );
+ ASSERT_EQUALS( 1, bt()->nKeys() );
+ checkSplit();
+ }
+ protected:
+ virtual char shortToken( int i ) const = 0;
+ virtual char longToken( int i ) const = 0;
+ static char leftToken( int i ) {
+ return 'a' + i;
+ }
+ static char rightToken( int i ) {
+ return 'z' - i;
+ }
+ virtual void checkSplit() = 0;
+ };
+
+ class SplitRightHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return leftToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return rightToken( i );
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS( 15, child( bt(), 0 )->nKeys() );
+ ASSERT_EQUALS( 4, child( bt(), 1 )->nKeys() );
+ }
+ };
+
+ class SplitLeftHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return rightToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return leftToken( i );
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS( 4, child( bt(), 0 )->nKeys() );
+ ASSERT_EQUALS( 15, child( bt(), 1 )->nKeys() );
+ }
+ };
+
+ class MissingLocate : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 3; ++i ) {
+ BSONObj k = simpleKey( 'b' + 2 * i );
+ insert( k );
+ }
+
+ locate( 1, 'a', 'b', dl() );
+ locate( 1, 'c', 'd', dl() );
+ locate( 1, 'e', 'f', dl() );
+ locate( 1, 'g', 'g' + 1, DiskLoc() ); // of course, 'h' isn't in the index.
+
+ // old behavior
+ // locate( -1, 'a', 'b', dl() );
+ // locate( -1, 'c', 'd', dl() );
+ // locate( -1, 'e', 'f', dl() );
+ // locate( -1, 'g', 'f', dl() );
+
+ locate( -1, 'a', 'a' - 1, DiskLoc() ); // of course, 'a' - 1 isn't in the index
+ locate( -1, 'c', 'b', dl() );
+ locate( -1, 'e', 'd', dl() );
+ locate( -1, 'g', 'f', dl() );
+ }
+ private:
+ void locate( int direction, char token, char expectedMatch,
+ DiskLoc expectedLocation ) {
+ BSONObj k = simpleKey( token );
+ int expectedPos = ( expectedMatch - 'b' ) / 2;
+ Base::locate( k, expectedPos, false, expectedLocation, direction );
+ }
+ };
+
+ class MissingLocateMultiBucket : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 8; ++i ) {
+ insert( i );
+ }
+ insert( 9 );
+ insert( 8 );
+// dump();
+ BSONObj straddle = key( 'i' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'k' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class SERVER983 : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ BSONObj straddle = key( 'o' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'q' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class DontReuseUnused : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ BSONObj root = key( 'p' );
+ unindex( root );
+ Base::insert( root );
+ locate( root, 0, true, bt()->getNextChild(), 1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class PackUnused : public Base {
+ public:
+ void run() {
+ for ( long long i = 0; i < 1000000; i += 1000 ) {
+ insert( i );
+ }
+ string orig, after;
+ {
+ stringstream ss;
+ bt()->shape( ss );
+ orig = ss.str();
+ }
+ vector< string > toDel;
+ vector< string > other;
+ BSONObjBuilder start;
+ start.appendMinKey( "a" );
+ BSONObjBuilder end;
+ end.appendMaxKey( "a" );
+ auto_ptr< BtreeCursor > c( BtreeCursor::make( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
+ while( c->ok() ) {
+ if ( c->curKeyHasChild() ) {
+ toDel.push_back( c->currKey().firstElement().valuestr() );
+ }
+ else {
+ other.push_back( c->currKey().firstElement().valuestr() );
+ }
+ c->advance();
+ }
+ ASSERT( toDel.size() > 0 );
+ for( vector< string >::const_iterator i = toDel.begin(); i != toDel.end(); ++i ) {
+ BSONObj o = BSON( "a" << *i );
+ unindex( o );
+ }
+ ASSERT( other.size() > 0 );
+ for( vector< string >::const_iterator i = other.begin(); i != other.end(); ++i ) {
+ BSONObj o = BSON( "a" << *i );
+ unindex( o );
+ }
+
+ long long unused = 0;
+ ASSERT_EQUALS( 0, bt()->fullValidate( dl(), order(), &unused, true ) );
+
+ for ( long long i = 50000; i < 50100; ++i ) {
+ insert( i );
+ }
+
+ long long unused2 = 0;
+ ASSERT_EQUALS( 100, bt()->fullValidate( dl(), order(), &unused2, true ) );
+
+// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
+//
+ ASSERT( unused2 <= unused );
+ }
+ protected:
+ void insert( long long n ) {
+ string val = bigNumString( n );
+ BSONObj k = BSON( "a" << val );
+ Base::insert( k );
+ }
+ };
+
+ class DontDropReferenceKey : public PackUnused {
+ public:
+ void run() {
+ // with 80 root node is full
+ for ( long long i = 0; i < 80; i += 1 ) {
+ insert( i );
+ }
+
+ BSONObjBuilder start;
+ start.appendMinKey( "a" );
+ BSONObjBuilder end;
+ end.appendMaxKey( "a" );
+ BSONObj l = bt()->keyNode( 0 ).key.toBson();
+ string toInsert;
+ auto_ptr< BtreeCursor > c( BtreeCursor::make( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
+ while( c->ok() ) {
+ if ( c->currKey().woCompare( l ) > 0 ) {
+ toInsert = c->currKey().firstElement().valuestr();
+ break;
+ }
+ c->advance();
+ }
+ // too much work to try to make this happen through inserts and deletes
+ // we are intentionally manipulating the btree bucket directly here
+ BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 ).prevChildBucket );
+ getDur().writing(L)->Null();
+ getDur().writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ BSONObj k = BSON( "a" << toInsert );
+ Base::insert( k );
+ }
+ };
+
+ class MergeBuckets : public Base {
+ public:
+ virtual ~MergeBuckets() {}
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ int expectedCount = 10 - unindexKeys();
+// dump();
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ long long unused = 0;
+ ASSERT_EQUALS( expectedCount, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ }
+ protected:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ virtual int unindexKeys() = 0;
+ };
+
+ class MergeBucketsLeft : public MergeBuckets {
+ virtual int unindexKeys() {
+ BSONObj k = key( 'b' );
+ unindex( k );
+ k = key( 'b' + 2 );
+ unindex( k );
+ k = key( 'b' + 4 );
+ unindex( k );
+ k = key( 'b' + 6 );
+ unindex( k );
+ return 4;
+ }
+ };
+
+ class MergeBucketsRight : public MergeBuckets {
+ virtual int unindexKeys() {
+ BSONObj k = key( 'b' + 2 * 9 );
+ unindex( k );
+ return 1;
+ }
+ };
+
+ // deleting from head won't coalesce yet
+// class MergeBucketsHead : public MergeBuckets {
+// virtual BSONObj unindexKey() { return key( 'p' ); }
+// };
+
+ class MergeBucketsDontReplaceHead : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 18; ++i ) {
+ insert( i );
+ }
+ // dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = key( 'a' + 17 );
+ unindex( k );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ long long unused = 0;
+ ASSERT_EQUALS( 17, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'a' + i );
+ Base::insert( k );
+ }
+ };
+
+ // Tool to construct custom trees for tests.
+ class ArtificialTree : public BtreeBucket {
+ public:
+ void push( const BSONObj &key, const DiskLoc &child ) {
+ KeyOwned k(key);
+ pushBack( dummyDiskLoc(), k, Ordering::make( BSON( "a" << 1 ) ), child );
+ }
+ void setNext( const DiskLoc &child ) {
+ nextChild = child;
+ }
+ static DiskLoc make( IndexDetails &id ) {
+ DiskLoc ret = addBucket( id );
+ is( ret )->init();
+ getDur().commitIfNeeded();
+ return ret;
+ }
+ static ArtificialTree *is( const DiskLoc &l ) {
+ return static_cast< ArtificialTree * >( l.btreemod() );
+ }
+ static DiskLoc makeTree( const string &spec, IndexDetails &id ) {
+ return makeTree( fromjson( spec ), id );
+ }
+ static DiskLoc makeTree( const BSONObj &spec, IndexDetails &id ) {
+ DiskLoc node = make( id );
+ ArtificialTree *n = ArtificialTree::is( node );
+ BSONObjIterator i( spec );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ DiskLoc child;
+ if ( e.type() == Object ) {
+ child = makeTree( e.embeddedObject(), id );
+ }
+ if ( e.fieldName() == string( "_" ) ) {
+ n->setNext( child );
+ }
+ else {
+ n->push( BSON( "" << expectedKey( e.fieldName() ) ), child );
+ }
+ }
+ n->fixParentPtrs( node );
+ return node;
+ }
+ static void setTree( const string &spec, IndexDetails &id ) {
+ set( makeTree( spec, id ), id );
+ }
+ static void set( const DiskLoc &l, IndexDetails &id ) {
+ ArtificialTree::is( id.head )->deallocBucket( id.head, id );
+ getDur().writingDiskLoc(id.head) = l;
+ }
+ static string expectedKey( const char *spec ) {
+ if ( spec[ 0 ] != '$' ) {
+ return spec;
+ }
+ char *endPtr;
+ // parsing a long long is a pain, so just allow shorter keys for now
+ unsigned long long num = strtol( spec + 1, &endPtr, 16 );
+ int len = 800;
+ if( *endPtr == '$' ) {
+ len = strtol( endPtr + 1, 0, 16 );
+ }
+ return Base::bigNumString( num, len );
+ }
+ static void checkStructure( const BSONObj &spec, const IndexDetails &id, const DiskLoc node ) {
+ ArtificialTree *n = ArtificialTree::is( node );
+ BSONObjIterator j( spec );
+ for( int i = 0; i < n->n; ++i ) {
+ ASSERT( j.more() );
+ BSONElement e = j.next();
+ KeyNode kn = n->keyNode( i );
+ string expected = expectedKey( e.fieldName() );
+ ASSERT( present( id, BSON( "" << expected ), 1 ) );
+ ASSERT( present( id, BSON( "" << expected ), -1 ) );
+ ASSERT_EQUALS( expected, kn.key.toBson().firstElement().valuestr() );
+ if ( kn.prevChildBucket.isNull() ) {
+ ASSERT( e.type() == jstNULL );
+ }
+ else {
+ ASSERT( e.type() == Object );
+ checkStructure( e.embeddedObject(), id, kn.prevChildBucket );
+ }
+ }
+ if ( n->nextChild.isNull() ) {
+ // maybe should allow '_' field with null value?
+ ASSERT( !j.more() );
+ }
+ else {
+ BSONElement e = j.next();
+ ASSERT_EQUALS( string( "_" ), e.fieldName() );
+ ASSERT( e.type() == Object );
+ checkStructure( e.embeddedObject(), id, n->nextChild );
+ }
+ ASSERT( !j.more() );
+ }
+ static void checkStructure( const string &spec, const IndexDetails &id ) {
+ checkStructure( fromjson( spec ), id, id.head );
+ }
+ static bool present( const IndexDetails &id, const BSONObj &key, int direction ) {
+ int pos;
+ bool found;
+ id.head.btree()->locate( id, id.head, key, Ordering::make(id.keyPattern()), pos, found, recordLoc(), direction );
+ return found;
+ }
+ int headerSize() const { return BtreeBucket::headerSize(); }
+ int packedDataSize( int pos ) const { return BtreeBucket::packedDataSize( pos ); }
+ void fixParentPtrs( const DiskLoc &thisLoc ) { BtreeBucket::fixParentPtrs( thisLoc ); }
+ void forcePack() {
+ topSize += emptySize;
+ emptySize = 0;
+ setNotPacked();
+ }
+ private:
+ DiskLoc dummyDiskLoc() const { return DiskLoc( 0, 2 ); }
+ };
+
+ /**
+ * We could probably refactor the following tests, but it's easier to debug
+ * them in the present state.
+ */
+
+ class MergeBucketsDelInternal : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}", id() );
+ }
+ };
+
+ class MergeBucketsRightNull : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}", id() );
+ }
+ };
+
+ // not yet handling this case
+ class DontMergeSingleBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},c:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{d:{b:{a:null}}}", id() );
+ }
+ };
+
+ class ParentMergeNonRightToLeft : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ // child does not currently replace parent in this case
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class ParentMergeNonRightToRight : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ff" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ // child does not currently replace parent in this case
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class CantMergeRightNoMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{d:{b:{a:null},cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class CantMergeLeftNoMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "g" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},d:null,_:{f:{e:null}}}", id() );
+ }
+ };
+
+ class MergeOption : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}", id() );
+ }
+ };
+
+ class ForceMergeLeft : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}", id() );
+ }
+ };
+
+ class ForceMergeRight : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}", id() );
+ }
+ };
+
+ class RecursiveMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ // height is not currently reduced in this case
+ ArtificialTree::checkStructure( "{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}", id() );
+ }
+ };
+
+ class RecursiveMergeRightBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}", id() );
+ }
+ };
+
+ class RecursiveMergeDoubleRightBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}", id() );
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+ long long keyCount = bt()->fullValidate( dl(), order(), 0, true );
+ ASSERT_EQUALS( 7, keyCount );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ // no recursion currently in this case
+ ArtificialTree::checkStructure( "{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}", id() );
+ }
+ };
+
+ class MergeSizeBase : public Base {
+ public:
+ MergeSizeBase() : _count() {}
+ virtual ~MergeSizeBase() {}
+ void run() {
+ typedef ArtificialTree A;
+ A::set( A::make( id() ), id() );
+ A* root = A::is( dl() );
+ DiskLoc left = A::make( id() );
+ root->push( biggestKey( 'm' ), left );
+ _count = 1;
+ A* l = A::is( left );
+ DiskLoc right = A::make( id() );
+ root->setNext( right );
+ A* r = A::is( right );
+ root->fixParentPtrs( dl() );
+
+ //ASSERT_EQUALS( bigSize(), bigSize() / 2 * 2 );
+ fillToExactSize( l, leftSize(), 'a' );
+ fillToExactSize( r, rightSize(), 'n' );
+ ASSERT( leftAdditional() <= 2 );
+ if ( leftAdditional() >= 2 ) {
+ l->push( bigKey( 'k' ), DiskLoc() );
+ }
+ if ( leftAdditional() >= 1 ) {
+ l->push( bigKey( 'l' ), DiskLoc() );
+ }
+ ASSERT( rightAdditional() <= 2 );
+ if ( rightAdditional() >= 2 ) {
+ r->push( bigKey( 'y' ), DiskLoc() );
+ }
+ if ( rightAdditional() >= 1 ) {
+ r->push( bigKey( 'z' ), DiskLoc() );
+ }
+ _count += leftAdditional() + rightAdditional();
+
+// dump();
+
+ initCheck();
+ string ns = id().indexNamespace();
+ const char *keys = delKeys();
+ for( const char *i = keys; *i; ++i ) {
+ long long unused = 0;
+ ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = bigKey( *i );
+ unindex( k );
+// dump();
+ --_count;
+ }
+
+// dump();
+
+ long long unused = 0;
+ ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ validate();
+ if ( !merge() ) {
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ }
+ else {
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ }
+ }
+ protected:
+ virtual int leftAdditional() const { return 2; }
+ virtual int rightAdditional() const { return 2; }
+ virtual void initCheck() {}
+ virtual void validate() {}
+ virtual int leftSize() const = 0;
+ virtual int rightSize() const = 0;
+ virtual const char * delKeys() const { return "klyz"; }
+ virtual bool merge() const { return true; }
+ void fillToExactSize( ArtificialTree *t, int targetSize, char startKey ) {
+ int size = 0;
+ while( size < targetSize ) {
+ int space = targetSize - size;
+ int nextSize = space - sizeof( _KeyNode );
+ assert( nextSize > 0 );
+ BSONObj newKey = key( startKey++, nextSize );
+ t->push( newKey, DiskLoc() );
+ size += BtreeBucket::KeyOwned(newKey).dataSize() + sizeof( _KeyNode );
+ _count += 1;
+ }
+ if( t->packedDataSize( 0 ) != targetSize ) {
+ ASSERT_EQUALS( t->packedDataSize( 0 ), targetSize );
+ }
+ }
+ static BSONObj key( char a, int size ) {
+ if ( size >= bigSize() ) {
+ return bigKey( a );
+ }
+ return simpleKey( a, size - ( bigSize() - 801 ) );
+ }
+ static BSONObj bigKey( char a ) {
+ return simpleKey( a, 801 );
+ }
+ static BSONObj biggestKey( char a ) {
+ int size = BtreeBucket::getKeyMax() - bigSize() + 801;
+ return simpleKey( a, size );
+ }
+ static int bigSize() {
+ return BtreeBucket::KeyOwned(bigKey( 'a' )).dataSize();
+ }
+ static int biggestSize() {
+ return BtreeBucket::KeyOwned(biggestKey( 'a' )).dataSize();
+ }
+ int _count;
+ };
+
+ class MergeSizeJustRightRight : public MergeSizeBase {
+ protected:
+ virtual int rightSize() const { return BtreeBucket::lowWaterMark() - 1; }
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::lowWaterMark() - 1 ); }
+ };
+
+ class MergeSizeJustRightLeft : public MergeSizeBase {
+ protected:
+ virtual int leftSize() const { return BtreeBucket::lowWaterMark() - 1; }
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::lowWaterMark() - 1 ); }
+ virtual const char * delKeys() const { return "yzkl"; }
+ };
+
+ class MergeSizeRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() - 1; }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ };
+
+ class MergeSizeLeft : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() - 1; }
+ };
+
+ class NoMergeBelowMarkRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() - 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class NoMergeBelowMarkLeft : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() - 1; }
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class MergeSizeRightTooBig : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class MergeSizeLeftTooBig : public MergeSizeJustRightRight {
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class BalanceOneLeftToRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},b:{$20:null,$30:null,$40:null,$50:null,a:null},_:{c:null}}", id() );
+ ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},b:{$10:null,$20:null,$30:null,$50:null,a:null},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceOneRightToLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null},b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x3 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$20:{$1:null,$2:null,$4:null,$10:null},b:{$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceThreeLeftToRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},$9:{$8:null},$11:{$10:null},$13:{$12:null},_:{$14:null}},b:{$30:null,$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
+ ASSERT_EQUALS( 23, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x30 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$9:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},_:{$8:null}},b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceThreeRightToLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},b:{$30:{$25:null},$40:{$35:null},$50:{$45:null},$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
+ ASSERT_EQUALS( 25, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x5 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 24, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},$30:{$25:null},$40:{$35:null},_:{$45:null}},b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceSingleParentKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
+ };
+
+ class PackEmpty : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null}", id() );
+ BSONObj k = BSON( "" << "a" );
+ ASSERT( unindex( k ) );
+ ArtificialTree *t = ArtificialTree::is( dl() );
+ t->forcePack();
+ Tester::checkEmpty( t, id() );
+ }
+ class Tester : public ArtificialTree {
+ public:
+ static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
+ Tester *t = static_cast< Tester * >( a );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT( !( t->flags & Packed ) );
+ Ordering o = Ordering::make( id.keyPattern() );
+ int zero = 0;
+ t->_packReadyForMod( o, zero );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT_EQUALS( 0, t->topSize );
+ ASSERT_EQUALS( BtreeBucket::bodySize(), t->emptySize );
+ ASSERT( t->flags & Packed );
+ }
+ };
+ };
+
+ class PackedDataSizeEmpty : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null}", id() );
+ BSONObj k = BSON( "" << "a" );
+ ASSERT( unindex( k ) );
+ ArtificialTree *t = ArtificialTree::is( dl() );
+ t->forcePack();
+ Tester::checkEmpty( t, id() );
+ }
+ class Tester : public ArtificialTree {
+ public:
+ static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
+ Tester *t = static_cast< Tester * >( a );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT( !( t->flags & Packed ) );
+ int zero = 0;
+ ASSERT_EQUALS( 0, t->packedDataSize( zero ) );
+ ASSERT( !( t->flags & Packed ) );
+ }
+ };
+ };
+
+ class BalanceSingleParentKeyPackParent : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ // force parent pack
+ ArtificialTree::is( dl() )->forcePack();
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
+ };
+
+ class BalanceSplitParent : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10$10:{$1:null,$2:null,$3:null,$4:null},$100:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null,$500:null,$600:null,$700:null,$800:null,$900:null,_:{c:null}}", id() );
+ ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x3 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 21, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$500:{$30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},$100:{$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null},_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}", id() );
+ }
+ };
+
+ class RebalancedSeparatorBase : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( treeSpec(), id() );
+ modTree();
+ Tester::checkSeparator( id(), expectedSeparator() );
+ }
+ virtual string treeSpec() const = 0;
+ virtual int expectedSeparator() const = 0;
+ virtual void modTree() {}
+ struct Tester : public ArtificialTree {
+ static void checkSeparator( const IndexDetails& id, int expected ) {
+ ASSERT_EQUALS( expected, static_cast< Tester * >( id.head.btreemod() )->rebalancedSeparatorPos( id.head, 0 ) );
+ }
+ };
+ };
+
+ class EvenRebalanceLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$7:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null,$6:null},_:{$8:null,$9:null,$10$31e:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceLeftCusp : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$6:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null},_:{$7:null,$8:null,$9$31e:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$3:{$1:null,$2$31f:null},_:{$4$31f:null,$5:null,$6:null,$7:null,$8$31e:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceRightCusp : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$4$31f:{$1:null,$2$31f:null,$3:null},_:{$5:null,$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceCenter : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$5:{$1:null,$2$31f:null,$3:null,$4$31f:null},_:{$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$6$31f:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$7:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$4:{$1:null,$2:null,$3:null},_:{$5:null,$6:null,$7:null,$8$31f:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceCenter : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$5:{$1:null,$2:null,$3:null,$4:null},_:{$6:null,$7:null,$8:null,$9:null,$10$31f:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class RebalanceEmptyRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$a:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null,$7:null,$8:null,$9:null},_:{$b:null}}"; }
+ virtual void modTree() {
+ BSONObj k = BSON( "" << bigNumString( 0xb ) );
+ ASSERT( unindex( k ) );
+ }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class RebalanceEmptyLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$a:{$1:null},_:{$11:null,$12:null,$13:null,$14:null,$15:null,$16:null,$17:null,$18:null,$19:null}}"; }
+ virtual void modTree() {
+ BSONObj k = BSON( "" << bigNumString( 0x1 ) );
+ ASSERT( unindex( k ) );
+ }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key.toBson() ); }
+ virtual bool merge() const { return false; }
+ protected:
+ BSONObj _oldTop;
+ };
+
+ class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize(); }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ // different top means we rebalanced
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ };
+
+ class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft {
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key.toBson() ); }
+ virtual bool merge() const { return false; }
+ protected:
+ BSONObj _oldTop;
+ };
+
+ class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft {
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize(); }
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ // different top means we rebalanced
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ };
+
+ class PreferBalanceLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$30:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x12 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$5:{$1:null,$2:null,$3:null,$4:null},$20:{$6:null,$10:null,$11:null,$13:null,$14:null},_:{$30:null}}", id() );
+ }
+ };
+
+ class PreferBalanceRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x12 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$10:{$1:null},$31:{$11:null,$13:null,$14:null,$20:null},_:{$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
+ }
+ };
+
+ class RecursiveMergeThenBalance : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},_:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
+ ASSERT_EQUALS( 15, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x7 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},_:{$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
+ }
+ };
+
+ class MergeRightEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 1; }
+ virtual const char * delKeys() const { return "lz"; }
+ virtual int rightSize() const { return 0; }
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
+ };
+
+ class MergeMinRightEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 0; }
+ virtual const char * delKeys() const { return "z"; }
+ virtual int rightSize() const { return 0; }
+ virtual int leftSize() const { return bigSize() + sizeof( _KeyNode ); }
+ };
+
+ class MergeLeftEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 1; }
+ virtual const char * delKeys() const { return "zl"; }
+ virtual int leftSize() const { return 0; }
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
+ };
+
+ class MergeMinLeftEmpty : public MergeSizeBase {
+ protected:
+ virtual int leftAdditional() const { return 1; }
+ virtual int rightAdditional() const { return 0; }
+ virtual const char * delKeys() const { return "l"; }
+ virtual int leftSize() const { return 0; }
+ virtual int rightSize() const { return bigSize() + sizeof( _KeyNode ); }
+ };
+
+ class BalanceRightEmpty : public MergeRightEmpty {
+ protected:
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
+ virtual bool merge() const { return false; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ private:
+ BSONObj _oldTop;
+ };
+
+ class BalanceLeftEmpty : public MergeLeftEmpty {
+ protected:
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
+ virtual bool merge() const { return false; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ private:
+ BSONObj _oldTop;
+ };
+
+ class DelEmptyNoNeighbors : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{b:{a:null}}", id() );
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:null}", id() );
+ }
+ };
+
+ class DelEmptyEmptyNeighbors : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "b" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,c:null,d:null}", id() );
+ }
+ };
+
+ class DelInternal : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ }
+ };
+
+ class DelInternalReplaceWithUnused : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ getDur().writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).prevChildBucket.btree()->keyNode( 0 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ long long unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ // doesn't discriminate between used and unused
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ }
+ };
+
+ class DelInternalReplaceRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{b:null}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:null}", id() );
+ }
+ };
+
+ class DelInternalPromoteKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "y" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,e:{c:{b:null},d:null},z:null}", id() );
+ }
+ };
+
+ class DelInternalPromoteRightKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{e:{c:null},_:{f:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:null,_:{e:null,f:null}}", id() );
+ }
+ };
+
+ class DelInternalReplacementPrevNonNull : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,d:{c:{b:null}},e:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 5, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "d" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,d:{c:{b:null}},e:null}", id() );
+ ASSERT( bt()->keyNode( 1 ).recordLoc.getOfs() & 1 ); // check 'unused' key
+ }
+ };
+
+ class DelInternalReplacementNextNonNull : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{c:null,_:{d:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,_:{c:null,_:{d:null}}}", id() );
+ ASSERT( bt()->keyNode( 0 ).recordLoc.getOfs() & 1 ); // check 'unused' key
+ }
+ };
+
+ class DelInternalSplitPromoteLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x30, 0x10 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$60:{$10:null,$20:null,$27:{$23:null,$25:null},$40:null,$50:null},_:{$70:null,$80:null,$90:null,$100:null}}", id() );
+ }
+ };
+
+ class DelInternalSplitPromoteRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x100, 0x10 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{$90:null,$97:{$93:null,$95:null}}}", id() );
+ }
+ };
+
+ class SignedZeroDuplication : public Base {
+ public:
+ void run() {
+ ASSERT_EQUALS( 0.0, -0.0 );
+ DBDirectClient c;
+ c.ensureIndex( ns(), BSON( "b" << 1 ), true );
+ c.insert( ns(), BSON( "b" << 0.0 ) );
+ c.insert( ns(), BSON( "b" << 1.0 ) );
+ c.update( ns(), BSON( "b" << 1.0 ), BSON( "b" << -0.0 ) );
+ ASSERT_EQUALS( 1U, c.count( ns(), BSON( "b" << 0.0 ) ) );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( testName ) {
+ }
+
+ void setupTests() {
+ add< Create >();
+ add< SimpleInsertDelete >();
+ add< SplitRightHeavyBucket >();
+ add< SplitLeftHeavyBucket >();
+ add< MissingLocate >();
+ add< MissingLocateMultiBucket >();
+ add< SERVER983 >();
+ add< DontReuseUnused >();
+ add< PackUnused >();
+ add< DontDropReferenceKey >();
+ add< MergeBucketsLeft >();
+ add< MergeBucketsRight >();
+// add< MergeBucketsHead >();
+ add< MergeBucketsDontReplaceHead >();
+ add< MergeBucketsDelInternal >();
+ add< MergeBucketsRightNull >();
+ add< DontMergeSingleBucket >();
+ add< ParentMergeNonRightToLeft >();
+ add< ParentMergeNonRightToRight >();
+ add< CantMergeRightNoMerge >();
+ add< CantMergeLeftNoMerge >();
+ add< MergeOption >();
+ add< ForceMergeLeft >();
+ add< ForceMergeRight >();
+ add< RecursiveMerge >();
+ add< RecursiveMergeRightBucket >();
+ add< RecursiveMergeDoubleRightBucket >();
+ add< MergeSizeJustRightRight >();
+ add< MergeSizeJustRightLeft >();
+ add< MergeSizeRight >();
+ add< MergeSizeLeft >();
+ add< NoMergeBelowMarkRight >();
+ add< NoMergeBelowMarkLeft >();
+ add< MergeSizeRightTooBig >();
+ add< MergeSizeLeftTooBig >();
+ add< BalanceOneLeftToRight >();
+ add< BalanceOneRightToLeft >();
+ add< BalanceThreeLeftToRight >();
+ add< BalanceThreeRightToLeft >();
+ add< BalanceSingleParentKey >();
+ add< PackEmpty >();
+ add< PackedDataSizeEmpty >();
+ add< BalanceSingleParentKeyPackParent >();
+ add< BalanceSplitParent >();
+ add< EvenRebalanceLeft >();
+ add< EvenRebalanceLeftCusp >();
+ add< EvenRebalanceRight >();
+ add< EvenRebalanceRightCusp >();
+ add< EvenRebalanceCenter >();
+ add< OddRebalanceLeft >();
+ add< OddRebalanceRight >();
+ add< OddRebalanceCenter >();
+ add< RebalanceEmptyRight >();
+ add< RebalanceEmptyLeft >();
+ add< NoMoveAtLowWaterMarkRight >();
+ add< MoveBelowLowWaterMarkRight >();
+ add< NoMoveAtLowWaterMarkLeft >();
+ add< MoveBelowLowWaterMarkLeft >();
+ add< PreferBalanceLeft >();
+ add< PreferBalanceRight >();
+ add< RecursiveMergeThenBalance >();
+ add< MergeRightEmpty >();
+ add< MergeMinRightEmpty >();
+ add< MergeLeftEmpty >();
+ add< MergeMinLeftEmpty >();
+ add< BalanceRightEmpty >();
+ add< BalanceLeftEmpty >();
+ add< DelEmptyNoNeighbors >();
+ add< DelEmptyEmptyNeighbors >();
+ add< DelInternal >();
+ add< DelInternalReplaceWithUnused >();
+ add< DelInternalReplaceRight >();
+ add< DelInternalPromoteKey >();
+ add< DelInternalPromoteRightKey >();
+ add< DelInternalReplacementPrevNonNull >();
+ add< DelInternalReplacementNextNonNull >();
+ add< DelInternalSplitPromoteLeft >();
+ add< DelInternalSplitPromoteRight >();
+ add< SignedZeroDuplication >();
+ }
+ } myall;
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
new file mode 100644
index 00000000000..a64894b43c1
--- /dev/null
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// client.cpp
+
+#include "pch.h"
+#include "../client/dbclient.h"
+#include "dbtests.h"
+#include "../db/concurrency.h"
+
+namespace ClientTests {
+
+ class Base {
+ public:
+
+ Base( string coll ) {
+ _ns = (string)"test." + coll;
+ }
+
+ virtual ~Base() {
+ db.dropCollection( _ns );
+ }
+
+ const char * ns() { return _ns.c_str(); }
+
+ string _ns;
+ DBDirectClient db;
+ };
+
+
+ class DropIndex : public Base {
+ public:
+ DropIndex() : Base( "dropindex" ) {}
+ void run() {
+ db.insert( ns() , BSON( "x" << 2 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ db.dropIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ db.dropIndexes( ns() );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+ }
+ };
+
+ class ReIndex : public Base {
+ public:
+ ReIndex() : Base( "reindex" ) {}
+ void run() {
+
+ db.insert( ns() , BSON( "x" << 2 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ db.reIndex( ns() );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+ }
+
+ };
+
+ class ReIndex2 : public Base {
+ public:
+ ReIndex2() : Base( "reindex2" ) {}
+ void run() {
+
+ db.insert( ns() , BSON( "x" << 2 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ BSONObj out;
+ ASSERT( db.runCommand( "test" , BSON( "reIndex" << "reindex2" ) , out ) );
+ ASSERT_EQUALS( 2 , out["nIndexes"].number() );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+ }
+
+ };
+
+ class CS_10 : public Base {
+ public:
+ CS_10() : Base( "CS_10" ) {}
+ void run() {
+ string longs( 770, 'c' );
+ for( int i = 0; i < 1111; ++i )
+ db.insert( ns(), BSON( "a" << i << "b" << longs ) );
+ db.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ) );
+
+ auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "a" << 1 << "b" << 1 ) ) );
+ ASSERT_EQUALS( 1111, c->itcount() );
+ }
+ };
+
+ class PushBack : public Base {
+ public:
+ PushBack() : Base( "PushBack" ) {}
+ void run() {
+ for( int i = 0; i < 10; ++i )
+ db.insert( ns(), BSON( "i" << i ) );
+ auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "i" << 1 ) ) );
+
+ BSONObj o = c->next();
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 9 , c->objsLeftInBatch() );
+ ASSERT( c->moreInCurrentBatch() );
+
+ c->putBack( o );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 10, c->objsLeftInBatch() );
+ ASSERT( c->moreInCurrentBatch() );
+
+ o = c->next();
+ BSONObj o2 = c->next();
+ BSONObj o3 = c->next();
+ c->putBack( o3 );
+ c->putBack( o2 );
+ c->putBack( o );
+ for( int i = 0; i < 10; ++i ) {
+ o = c->next();
+ ASSERT_EQUALS( i, o[ "i" ].number() );
+ }
+ ASSERT( !c->more() );
+ ASSERT_EQUALS( 0, c->objsLeftInBatch() );
+ ASSERT( !c->moreInCurrentBatch() );
+
+ c->putBack( o );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 1, c->objsLeftInBatch() );
+ ASSERT( c->moreInCurrentBatch() );
+ ASSERT_EQUALS( 1, c->itcount() );
+ }
+ };
+
+ class Create : public Base {
+ public:
+ Create() : Base( "Create" ) {}
+ void run() {
+ db.createCollection( "unittests.clienttests.create", 4096, true );
+ BSONObj info;
+ ASSERT( db.runCommand( "unittests", BSON( "collstats" << "clienttests.create" ), info ) );
+ }
+ };
+
+ class ConnectionStringTests {
+ public:
+ void run() {
+ {
+ ConnectionString s( "a/b,c,d" , ConnectionString::SET );
+ ASSERT_EQUALS( ConnectionString::SET , s.type() );
+ ASSERT_EQUALS( "a" , s.getSetName() );
+ vector<HostAndPort> v = s.getServers();
+ ASSERT_EQUALS( 3U , v.size() );
+ ASSERT_EQUALS( "b" , v[0].host() );
+ ASSERT_EQUALS( "c" , v[1].host() );
+ ASSERT_EQUALS( "d" , v[2].host() );
+ }
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "client" ) {
+ }
+
+ void setupTests() {
+ add<DropIndex>();
+ add<ReIndex>();
+ add<ReIndex2>();
+ add<CS_10>();
+ add<PushBack>();
+ add<Create>();
+ add<ConnectionStringTests>();
+ }
+
+ } all;
+}
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
new file mode 100644
index 00000000000..fa6204d25fd
--- /dev/null
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -0,0 +1,98 @@
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../client/dbclient.h"
+#include "dbtests.h"
+#include "../db/concurrency.h"
+
+using namespace mongo;
+
+namespace CommandTests {
+ // one namespace per command
+ namespace FileMD5 {
+ struct Base {
+ Base() {
+ db.dropCollection(ns());
+ db.ensureIndex(ns(), BSON( "files_id" << 1 << "n" << 1 ));
+ }
+
+ const char* ns() { return "test.fs.chunks"; }
+
+ DBDirectClient db;
+ };
+ struct Type0 : Base {
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 0);
+ b.appendBinData("data", 6, BinDataGeneral, "hello ");
+ db.insert(ns(), b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 1);
+ b.appendBinData("data", 5, BinDataGeneral, "world");
+ db.insert(ns(), b.obj());
+ }
+
+ BSONObj result;
+ ASSERT( db.runCommand("test", BSON("filemd5" << 0), result) );
+ ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() );
+ }
+ };
+ struct Type2 : Base {
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 0);
+ b.appendBinDataArrayDeprecated("data", "hello ", 6);
+ db.insert(ns(), b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 1);
+ b.appendBinDataArrayDeprecated("data", "world", 5);
+ db.insert(ns(), b.obj());
+ }
+
+ BSONObj result;
+ ASSERT( db.runCommand("test", BSON("filemd5" << 0), result) );
+ ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() );
+ }
+ };
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "commands" ) {
+ }
+
+ void setupTests() {
+ add< FileMD5::Type0 >();
+ add< FileMD5::Type2 >();
+ }
+
+ } all;
+}
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
new file mode 100644
index 00000000000..0d2575f14e3
--- /dev/null
+++ b/src/mongo/dbtests/counttests.cpp
@@ -0,0 +1,142 @@
+// counttests.cpp : count.{h,cpp} unit tests.
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../db/ops/count.h"
+
+#include "../db/cursor.h"
+#include "../db/pdfile.h"
+
+#include "dbtests.h"
+
+namespace CountTests {
+
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context( ns() ) {
+ addIndex( fromjson( "{\"a\":1}" ) );
+ }
+ ~Base() {
+ try {
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() );
+ vector< DiskLoc > toDelete;
+ for(; c->ok(); c->advance() )
+ toDelete.push_back( c->currLoc() );
+ for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
+ theDataFileMgr.deleteRecord( ns(), i->rec(), *i, false );
+ DBDirectClient cl;
+ cl.dropIndexes( ns() );
+ }
+ catch ( ... ) {
+ FAIL( "Exception while cleaning up collection" );
+ }
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.counttests";
+ }
+ static void addIndex( const BSONObj &key ) {
+ BSONObjBuilder b;
+ b.append( "name", key.firstElementFieldName() );
+ b.append( "ns", ns() );
+ b.append( "key", key );
+ BSONObj o = b.done();
+ stringstream indexNs;
+ indexNs << "unittests.system.indexes";
+ theDataFileMgr.insert( indexNs.str().c_str(), o.objdata(), o.objsize() );
+ }
+ static void insert( const char *s ) {
+ insert( fromjson( s ) );
+ }
+ static void insert( const BSONObj &o ) {
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize() );
+ }
+ };
+
+ class CountBasic : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{}}" );
+ string err;
+ ASSERT_EQUALS( 1, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountQuery : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"a\":\"b\",\"x\":\"y\"}" );
+ insert( "{\"a\":\"c\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{\"a\":\"b\"}}" );
+ string err;
+ ASSERT_EQUALS( 2, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountFields : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"c\":\"d\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{},\"fields\":{\"a\":1}}" );
+ string err;
+ ASSERT_EQUALS( 2, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountQueryFields : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"a\":\"c\"}" );
+ insert( "{\"d\":\"e\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{\"a\":\"b\"},\"fields\":{\"a\":1}}" );
+ string err;
+ ASSERT_EQUALS( 1, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountIndexedRegex : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"a\":\"c\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{\"a\":/^b/}}" );
+ string err;
+ ASSERT_EQUALS( 1, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "count" ) {
+ }
+
+ void setupTests() {
+ add< CountBasic >();
+ add< CountQuery >();
+ add< CountFields >();
+ add< CountQueryFields >();
+ add< CountIndexedRegex >();
+ }
+ } myall;
+
+} // namespace CountTests
diff --git a/src/mongo/dbtests/cursortests.cpp b/src/mongo/dbtests/cursortests.cpp
new file mode 100644
index 00000000000..a7b52aada12
--- /dev/null
+++ b/src/mongo/dbtests/cursortests.cpp
@@ -0,0 +1,305 @@
+// cusrortests.cpp // cursor related unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/clientcursor.h"
+#include "../db/instance.h"
+#include "../db/btree.h"
+#include "../db/queryutil.h"
+#include "dbtests.h"
+
+namespace CursorTests {
+
+ namespace BtreeCursorTests {
+
+ // The ranges expressed in these tests are impossible given our query
+ // syntax, so going to do them a hacky way.
+
+ class Base {
+ protected:
+ static const char *ns() { return "unittests.cursortests.Base"; }
+ FieldRangeVector *vec( int *vals, int len, int direction = 1 ) {
+ FieldRangeSet s( "", BSON( "a" << 1 ), true );
+ for( int i = 0; i < len; i += 2 ) {
+ _objs.push_back( BSON( "a" << BSON( "$gte" << vals[ i ] << "$lte" << vals[ i + 1 ] ) ) );
+ FieldRangeSet s2( "", _objs.back(), true );
+ if ( i == 0 ) {
+ s.range( "a" ) = s2.range( "a" );
+ }
+ else {
+ s.range( "a" ) |= s2.range( "a" );
+ }
+ }
+ // orphan idxSpec for this test
+ IndexSpec *idxSpec = new IndexSpec( BSON( "a" << 1 ) );
+ return new FieldRangeVector( s, *idxSpec, direction );
+ }
+ DBDirectClient _c;
+ private:
+ vector< BSONObj > _objs;
+ };
+
+ class MultiRange : public Base {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRange";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ int v[] = { 1, 2, 4, 6 };
+ boost::shared_ptr< FieldRangeVector > frv( vec( v, 4 ) );
+ Client::Context ctx( ns );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ) );
+ BtreeCursor &c = *_c.get();
+ ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
+ double expected[] = { 1, 2, 4, 5, 6 };
+ for( int i = 0; i < 5; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class MultiRangeGap : public Base {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeGap";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ for( int i = 100; i < 110; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ int v[] = { -50, 2, 40, 60, 109, 200 };
+ boost::shared_ptr< FieldRangeVector > frv( vec( v, 6 ) );
+ Client::Context ctx( ns );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make(nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ) );
+ BtreeCursor &c = *_c.get();
+ ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
+ double expected[] = { 0, 1, 2, 109 };
+ for( int i = 0; i < 4; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class MultiRangeReverse : public Base {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeReverse";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ int v[] = { 1, 2, 4, 6 };
+ boost::shared_ptr< FieldRangeVector > frv( vec( v, 4, -1 ) );
+ Client::Context ctx( ns );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, -1 ) );
+ BtreeCursor& c = *_c.get();
+ ASSERT_EQUALS( "BtreeCursor a_1 reverse multi", c.toString() );
+ double expected[] = { 6, 5, 4, 2, 1 };
+ for( int i = 0; i < 5; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class Base2 {
+ public:
+ virtual ~Base2() { _c.dropCollection( ns() ); }
+ protected:
+ static const char *ns() { return "unittests.cursortests.Base2"; }
+ DBDirectClient _c;
+ virtual BSONObj idx() const = 0;
+ virtual int direction() const { return 1; }
+ void insert( const BSONObj &o ) {
+ _objs.push_back( o );
+ _c.insert( ns(), o );
+ }
+ void check( const BSONObj &spec ) {
+ {
+ BSONObj keypat = idx();
+ //cout << keypat.toString() << endl;
+ _c.ensureIndex( ns(), idx() );
+ }
+
+ Client::Context ctx( ns() );
+ FieldRangeSet frs( ns(), spec, true );
+ // orphan spec for this test.
+ IndexSpec *idxSpec = new IndexSpec( idx() );
+ boost::shared_ptr< FieldRangeVector > frv( new FieldRangeVector( frs, *idxSpec, direction() ) );
+ scoped_ptr<BtreeCursor> c( BtreeCursor::make( nsdetails( ns() ), 1, nsdetails( ns() )->idx( 1 ), frv, direction() ) );
+ Matcher m( spec );
+ int count = 0;
+ while( c->ok() ) {
+ ASSERT( m.matches( c->current() ) );
+ c->advance();
+ ++count;
+ }
+ int expectedCount = 0;
+ for( vector< BSONObj >::const_iterator i = _objs.begin(); i != _objs.end(); ++i ) {
+ if ( m.matches( *i ) ) {
+ ++expectedCount;
+ }
+ }
+ ASSERT_EQUALS( expectedCount, count );
+ }
+ private:
+ dblock _lk;
+ vector< BSONObj > _objs;
+ };
+
+ class EqEq : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 4 ) );
+ insert( BSON( "a" << 5 << "b" << 4 ) );
+ check( BSON( "a" << 4 << "b" << 5 ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class EqRange : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 3 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 0 ) );
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 10 ) );
+ insert( BSON( "a" << 4 << "b" << 11 ) );
+ insert( BSON( "a" << 5 << "b" << 5 ) );
+ check( BSON( "a" << 4 << "b" << BSON( "$gte" << 1 << "$lte" << 10 ) ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class EqIn : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 3 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 0 ) );
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 10 ) );
+ insert( BSON( "a" << 4 << "b" << 11 ) );
+ insert( BSON( "a" << 5 << "b" << 5 ) );
+ check( BSON( "a" << 4 << "b" << BSON( "$in" << BSON_ARRAY( 5 << 6 << 11 ) ) ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class RangeEq : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 0 << "b" << 4 ) );
+ insert( BSON( "a" << 1 << "b" << 4 ) );
+ insert( BSON( "a" << 4 << "b" << 3 ) );
+ insert( BSON( "a" << 5 << "b" << 4 ) );
+ insert( BSON( "a" << 7 << "b" << 4 ) );
+ insert( BSON( "a" << 4 << "b" << 4 ) );
+ insert( BSON( "a" << 9 << "b" << 6 ) );
+ insert( BSON( "a" << 11 << "b" << 1 ) );
+ insert( BSON( "a" << 11 << "b" << 4 ) );
+ check( BSON( "a" << BSON( "$gte" << 1 << "$lte" << 10 ) << "b" << 4 ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class RangeIn : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 0 << "b" << 4 ) );
+ insert( BSON( "a" << 1 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 3 ) );
+ insert( BSON( "a" << 5 << "b" << 4 ) );
+ insert( BSON( "a" << 7 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 4 ) );
+ insert( BSON( "a" << 9 << "b" << 6 ) );
+ insert( BSON( "a" << 11 << "b" << 1 ) );
+ insert( BSON( "a" << 11 << "b" << 4 ) );
+ check( BSON( "a" << BSON( "$gte" << 1 << "$lte" << 10 ) << "b" << BSON( "$in" << BSON_ARRAY( 4 << 6 ) ) ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class AbortImplicitScan : public Base {
+ public:
+ void run() {
+ dblock lk;
+ IndexSpec idx( BSON( "a" << 1 << "b" << 1 ) );
+ _c.ensureIndex( ns(), idx.keyPattern );
+ for( int i = 0; i < 300; ++i ) {
+ _c.insert( ns(), BSON( "a" << i << "b" << 5 ) );
+ }
+ FieldRangeSet frs( ns(), BSON( "b" << 3 ), true );
+ boost::shared_ptr<FieldRangeVector> frv( new FieldRangeVector( frs, idx, 1 ) );
+ Client::Context ctx( ns() );
+ scoped_ptr<BtreeCursor> c( BtreeCursor::make( nsdetails( ns() ), 1, nsdetails( ns() )->idx(1), frv, 1 ) );
+ long long initialNscanned = c->nscanned();
+ ASSERT( initialNscanned < 200 );
+ ASSERT( c->ok() );
+ c->advance();
+ ASSERT( c->nscanned() > initialNscanned );
+ ASSERT( c->nscanned() < 200 );
+ ASSERT( c->ok() );
+ }
+ };
+
+ } // namespace BtreeCursorTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "cursor" ) {}
+
+ void setupTests() {
+ add< BtreeCursorTests::MultiRange >();
+ add< BtreeCursorTests::MultiRangeGap >();
+ add< BtreeCursorTests::MultiRangeReverse >();
+ add< BtreeCursorTests::EqEq >();
+ add< BtreeCursorTests::EqRange >();
+ add< BtreeCursorTests::EqIn >();
+ add< BtreeCursorTests::RangeEq >();
+ add< BtreeCursorTests::RangeIn >();
+ add< BtreeCursorTests::AbortImplicitScan >();
+ }
+ } myall;
+} // namespace CursorTests
diff --git a/src/mongo/dbtests/d_chunk_manager_tests.cpp b/src/mongo/dbtests/d_chunk_manager_tests.cpp
new file mode 100644
index 00000000000..2bcc90faf7a
--- /dev/null
+++ b/src/mongo/dbtests/d_chunk_manager_tests.cpp
@@ -0,0 +1,467 @@
+//@file d_chunk_manager_tests.cpp : s/d_chunk_manager.{h,cpp} tests
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "dbtests.h"
+
+#include "../s/d_chunk_manager.h"
+
+namespace {
+
+ class BasicTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // single-chunk collection
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << MINKEY ) <<
+ "max" << BSON( "a" << MAXKEY ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj k1 = BSON( "a" << MINKEY );
+ ASSERT( s.belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << MAXKEY );
+ ASSERT( ! s.belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 1 << "b" << 2 );
+ ASSERT( s.belongsToMe( k3 ) );
+ }
+ };
+
+ class BasicCompoundTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1) <<
+ "unique" << false );
+
+ // single-chunk collection
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKeyb_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << MINKEY << "b" << MINKEY ) <<
+ "max" << BSON( "a" << MAXKEY << "b" << MAXKEY ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj k1 = BSON( "a" << MINKEY << "b" << MINKEY );
+ ASSERT( s.belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << MAXKEY << "b" << MAXKEY );
+ ASSERT( ! s.belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << MINKEY << "b" << 10 );
+ ASSERT( s.belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 10 << "b" << 20 );
+ ASSERT( s.belongsToMe( k4 ) );
+ }
+ };
+
+ class RangeTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // 3-chunk collection, 2 of them being contiguous
+ // [min->10) , [10->20) , <gap> , [30->max)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << MINKEY ) <<
+ "max" << BSON( "a" << 10 ) ) <<
+ BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 ) <<
+ "max" << BSON( "a" << 20 ) ) <<
+ BSON( "_id" << "x.y-a_30" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 ) <<
+ "max" << BSON( "a" << MAXKEY ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj k1 = BSON( "a" << 5 );
+ ASSERT( s.belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << 10 );
+ ASSERT( s.belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 25 );
+ ASSERT( ! s.belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 30 );
+ ASSERT( s.belongsToMe( k4 ) );
+ BSONObj k5 = BSON( "a" << 40 );
+ ASSERT( s.belongsToMe( k5 ) );
+ }
+ };
+
+ class GetNextTests {
+ public:
+ void run() {
+
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+ // empty collection
+ BSONArray chunks1 = BSONArray();
+ ShardChunkManager s1( collection , chunks1 );
+
+ BSONObj empty;
+ BSONObj arbitraryKey = BSON( "a" << 10 );
+ BSONObj foundMin, foundMax;
+
+ ASSERT( s1.getNextChunk( empty , &foundMin , &foundMax ) );
+ ASSERT( foundMin.isEmpty() );
+ ASSERT( foundMax.isEmpty() );
+ ASSERT( s1.getNextChunk( arbitraryKey , &foundMin , &foundMax ) );
+ ASSERT( foundMin.isEmpty() );
+ ASSERT( foundMax.isEmpty() );
+
+ // single-chunk collection
+ // [10->20]
+ BSONObj key_a10 = BSON( "a" << 10 );
+ BSONObj key_a20 = BSON( "a" << 20 );
+ BSONArray chunks2 = BSON_ARRAY( BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << key_a10 <<
+ "max" << key_a20 ) );
+ ShardChunkManager s2( collection , chunks2 );
+ ASSERT( s2.getNextChunk( empty , &foundMin , &foundMax ) );
+ ASSERT( foundMin.woCompare( key_a10 ) == 0 );
+ ASSERT( foundMax.woCompare( key_a20 ) == 0 );
+
+ // 3-chunk collection, 2 of them being contiguous
+ // [min->10) , [10->20) , <gap> , [30->max)
+ BSONObj key_a30 = BSON( "a" << 30 );
+ BSONObj key_min = BSON( "a" << MINKEY );
+ BSONObj key_max = BSON( "a" << MAXKEY );
+ BSONArray chunks3 = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
+ "ns" << "x.y" <<
+ "min" << key_min <<
+ "max" << key_a10 ) <<
+ BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << key_a10 <<
+ "max" << key_a20 ) <<
+ BSON( "_id" << "x.y-a_30" <<
+ "ns" << "x.y" <<
+ "min" << key_a30 <<
+ "max" << key_max ) );
+ ShardChunkManager s3( collection , chunks3 );
+ ASSERT( ! s3.getNextChunk( empty , &foundMin , &foundMax ) ); // not eof
+ ASSERT( foundMin.woCompare( key_min ) == 0 );
+ ASSERT( foundMax.woCompare( key_a10 ) == 0 );
+ ASSERT( ! s3.getNextChunk( key_a10 , &foundMin , &foundMax ) );
+ ASSERT( foundMin.woCompare( key_a30 ) == 0 );
+ ASSERT( foundMax.woCompare( key_max ) == 0 );
+ ASSERT( s3.getNextChunk( key_a30 , &foundMin , &foundMax ) );
+ }
+ };
+
+ class DeletedTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << "true" );
+
+ BSONArray chunks = BSONArray();
+
+ ASSERT_THROWS( ShardChunkManager s ( collection , chunks ) , UserException );
+ }
+ };
+
+ class ClonePlusTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // new chunk [20,0-30,0)
+ BSONObj min = BSON( "a" << 20 << "b" << 0 );
+ BSONObj max = BSON( "a" << 30 << "b" << 0 );
+ ShardChunkManagerPtr cloned( s.clonePlus( min , max , 1 /* TODO test version */ ) );
+
+ BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << 20 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 25 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 30 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k4 ) );
+ }
+ };
+
+ class ClonePlusExceptionTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // [15,0-25,0) overlaps [10,0-20,0)
+ BSONObj min = BSON( "a" << 15 << "b" << 0 );
+ BSONObj max = BSON( "a" << 25 << "b" << 0 );
+ ASSERT_THROWS( s.clonePlus ( min , max , 1 /* TODO test version */ ) , UserException );
+ }
+ };
+
+ class CloneMinusTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+
+ // 2-chunk collection
+ // [10,0->20,0) , <gap> , [30,0->40,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) <<
+ BSON( "_id" << "x.y-a_30b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 << "b" << 0 ) <<
+ "max" << BSON( "a" << 40 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // deleting chunk [10,0-20,0)
+ BSONObj min = BSON( "a" << 10 << "b" << 0 );
+ BSONObj max = BSON( "a" << 20 << "b" << 0 );
+ ShardChunkManagerPtr cloned( s.cloneMinus( min , max , 1 /* TODO test version */ ) );
+
+ BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << 15 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 30 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 35 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k4 ) );
+ BSONObj k5 = BSON( "a" << 40 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k5 ) );
+ }
+ };
+
+ class CloneMinusExceptionTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+
+ // 2-chunk collection
+ // [10,0->20,0) , <gap> , [30,0->40,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) <<
+ BSON( "_id" << "x.y-a_30b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 << "b" << 0 ) <<
+ "max" << BSON( "a" << 40 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // deleting non-existing chunk [25,0-28,0)
+ BSONObj min1 = BSON( "a" << 25 << "b" << 0 );
+ BSONObj max1 = BSON( "a" << 28 << "b" << 0 );
+ ASSERT_THROWS( s.cloneMinus( min1 , max1 , 1 /* TODO test version */ ) , UserException );
+
+
+ // deletin an overlapping range (not exactly a chunk) [15,0-25,0)
+ BSONObj min2 = BSON( "a" << 15 << "b" << 0 );
+ BSONObj max2 = BSON( "a" << 25 << "b" << 0 );
+ ASSERT_THROWS( s.cloneMinus( min2 , max2 , 1 /* TODO test version */ ) , UserException );
+ }
+ };
+
+ class CloneSplitTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONObj min = BSON( "a" << 10 << "b" << 0 );
+ BSONObj max = BSON( "a" << 20 << "b" << 0 );
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey"
+ << "ns" << "test.foo"
+ << "min" << min
+ << "max" << max ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj split1 = BSON( "a" << 15 << "b" << 0 );
+ BSONObj split2 = BSON( "a" << 18 << "b" << 0 );
+ vector<BSONObj> splitKeys;
+ splitKeys.push_back( split1 );
+ splitKeys.push_back( split2 );
+ ShardChunkVersion version( 1 , 99 ); // first chunk 1|99 , second 1|100
+ ShardChunkManagerPtr cloned( s.cloneSplit( min , max , splitKeys , version ) );
+
+ version.incMinor(); /* second chunk 1|100, first split point */
+ version.incMinor(); /* third chunk 1|101, second split point */
+ ASSERT_EQUALS( cloned->getVersion() , version /* 1|101 */ );
+ ASSERT_EQUALS( s.getNumChunks() , 1u );
+ ASSERT_EQUALS( cloned->getNumChunks() , 3u );
+ ASSERT( cloned->belongsToMe( min ) );
+ ASSERT( cloned->belongsToMe( split1 ) );
+ ASSERT( cloned->belongsToMe( split2 ) );
+ ASSERT( ! cloned->belongsToMe( max ) );
+ }
+ };
+
+ class CloneSplitExceptionTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONObj min = BSON( "a" << 10 << "b" << 0 );
+ BSONObj max = BSON( "a" << 20 << "b" << 0 );
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey"
+ << "ns" << "test.foo"
+ << "min" << min
+ << "max" << max ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj badSplit = BSON( "a" << 5 << "b" << 0 );
+ vector<BSONObj> splitKeys;
+ splitKeys.push_back( badSplit );
+ ASSERT_THROWS( s.cloneSplit( min , max , splitKeys , ShardChunkVersion( 1 ) ) , UserException );
+
+ BSONObj badMax = BSON( "a" << 25 << "b" << 0 );
+ BSONObj split = BSON( "a" << 15 << "b" << 0 );
+ splitKeys.clear();
+ splitKeys.push_back( split );
+ ASSERT_THROWS( s.cloneSplit( min , badMax, splitKeys , ShardChunkVersion( 1 ) ) , UserException );
+ }
+ };
+
+ class EmptyShardTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // no chunks on this shard
+ BSONArray chunks;
+
+ // shard can have zero chunks for an existing collection
+ // version should be 0, though
+ ShardChunkManager s( collection , chunks );
+ ASSERT_EQUALS( s.getVersion() , ShardChunkVersion( 0 ) );
+ ASSERT_EQUALS( s.getNumChunks() , 0u );
+ }
+ };
+
+ class LastChunkTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // 1-chunk collection
+ // [10->20)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_10" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << 10 ) <<
+ "max" << BSON( "a" << 20 ) ) );
+
+ ShardChunkManager s( collection , chunks );
+ BSONObj min = BSON( "a" << 10 );
+ BSONObj max = BSON( "a" << 20 );
+
+ // if we remove the only chunk, the only version accepted is 0
+ ShardChunkVersion nonZero = 99;
+ ASSERT_THROWS( s.cloneMinus( min , max , nonZero ) , UserException );
+ ShardChunkManagerPtr empty( s.cloneMinus( min , max , 0 ) );
+ ASSERT_EQUALS( empty->getVersion() , ShardChunkVersion( 0 ) );
+ ASSERT_EQUALS( empty->getNumChunks() , 0u );
+ BSONObj k = BSON( "a" << 15 << "b" << 0 );
+ ASSERT( ! empty->belongsToMe( k ) );
+
+ // we can add a chunk to an empty manager
+ // version should be provided
+ ASSERT_THROWS( empty->clonePlus( min , max , 0 ) , UserException );
+ ShardChunkManagerPtr cloned( empty->clonePlus( min , max , nonZero ) );
+ ASSERT_EQUALS( cloned->getVersion(), nonZero );
+ ASSERT_EQUALS( cloned->getNumChunks() , 1u );
+ ASSERT( cloned->belongsToMe( k ) );
+ }
+ };
+
+ class ShardChunkManagerSuite : public Suite {
+ public:
+ ShardChunkManagerSuite() : Suite ( "shard_chunk_manager" ) {}
+
+ void setupTests() {
+ add< BasicTests >();
+ add< BasicCompoundTests >();
+ add< RangeTests >();
+ add< GetNextTests >();
+ add< DeletedTests >();
+ add< ClonePlusTests >();
+ add< ClonePlusExceptionTests >();
+ add< CloneMinusTests >();
+ add< CloneMinusExceptionTests >();
+ add< CloneSplitTests >();
+ add< CloneSplitExceptionTests >();
+ add< EmptyShardTests >();
+ add< LastChunkTests >();
+ }
+ } shardChunkManagerSuite;
+
+} // anonymous namespace
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
new file mode 100644
index 00000000000..fde0f669c98
--- /dev/null
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -0,0 +1,29 @@
+// #file dbtests.cpp : Runs db unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "dbtests.h"
+#include "../util/unittest.h"
+
+int main( int argc, char** argv ) {
+ static StaticObserver StaticObserver;
+ doPreServerStartupInits();
+ UnitTest::runTests();
+ return Suite::run(argc, argv, "/tmp/unittest");
+}
diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h
new file mode 100644
index 00000000000..dbaeea1d180
--- /dev/null
+++ b/src/mongo/dbtests/dbtests.h
@@ -0,0 +1,25 @@
+// dbtests.h : Test suite generator headers.
+//
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "framework.h"
+
+using namespace mongo;
+using namespace mongo::regression;
+using boost::shared_ptr;
+
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
new file mode 100644
index 00000000000..860eb7e7e5c
--- /dev/null
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -0,0 +1,103 @@
+/** @file directclienttests.cpp
+*/
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/ops/query.h"
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+#include "../db/ops/update.h"
+#include "../util/timer.h"
+#include "dbtests.h"
+
+namespace DirectClientTests {
+
+ class ClientBase {
+ public:
+ // NOTE: Not bothering to backup the old error record.
+ ClientBase() { mongo::lastError.reset( new LastError() ); }
+ virtual ~ClientBase() { }
+ protected:
+ static bool error() {
+ return !_client.getPrevError().getField( "err" ).isNull();
+ }
+ DBDirectClient &client() const { return _client; }
+ private:
+ static DBDirectClient _client;
+ };
+ DBDirectClient ClientBase::_client;
+
+ const char *ns = "a.b";
+
+ class Capped : public ClientBase {
+ public:
+ virtual void run() {
+ for( int pass=0; pass < 3; pass++ ) {
+ client().createCollection(ns, 1024 * 1024, true, 999);
+ for( int j =0; j < pass*3; j++ )
+ client().insert(ns, BSON("x" << j));
+
+ // test truncation of a capped collection
+ if( pass ) {
+ BSONObj info;
+ BSONObj cmd = BSON( "captrunc" << "b" << "n" << 1 << "inc" << true );
+ //cout << cmd.toString() << endl;
+ bool ok = client().runCommand("a", cmd, info);
+ //cout << info.toString() << endl;
+ assert(ok);
+ }
+
+ assert( client().dropCollection(ns) );
+ }
+ }
+ };
+
+ class InsertMany : ClientBase {
+ public:
+ virtual void run(){
+ vector<BSONObj> objs;
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 2));
+
+
+ client().dropCollection(ns);
+ client().insert(ns, objs);
+ ASSERT_EQUALS(client().getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client().count(ns), 1);
+
+ client().dropCollection(ns);
+ client().insert(ns, objs, InsertOption_ContinueOnError);
+ ASSERT_EQUALS(client().getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client().count(ns), 2);
+ }
+
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "directclient" ) {
+ }
+ void setupTests() {
+ add< Capped >();
+ add< InsertMany >();
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
new file mode 100644
index 00000000000..95ed8b33668
--- /dev/null
+++ b/src/mongo/dbtests/framework.cpp
@@ -0,0 +1,446 @@
+// framework.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/version.h"
+#include <boost/program_options.hpp>
+
+#undef assert
+#define assert MONGO_assert
+
+#include "framework.h"
+#include "../util/file_allocator.h"
+#include "../db/dur.h"
+#include "../util/background.h"
+
+#ifndef _WIN32
+#include <cxxabi.h>
+#include <sys/file.h>
+#endif
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ CmdLine cmdLine;
+
+ namespace regression {
+
+ map<string,Suite*> * mongo::regression::Suite::_suites = 0;
+
+ class Result {
+ public:
+ Result( string name ) : _name( name ) , _rc(0) , _tests(0) , _fails(0) , _asserts(0) {
+ }
+
+ string toString() {
+ stringstream ss;
+
+ char result[128];
+ sprintf(result, "%-20s | tests: %4d | fails: %4d | assert calls: %6d\n", _name.c_str(), _tests, _fails, _asserts);
+ ss << result;
+
+ for ( list<string>::iterator i=_messages.begin(); i!=_messages.end(); i++ ) {
+ ss << "\t" << *i << '\n';
+ }
+
+ return ss.str();
+ }
+
+ int rc() {
+ return _rc;
+ }
+
+ string _name;
+
+ int _rc;
+ int _tests;
+ int _fails;
+ int _asserts;
+ list<string> _messages;
+
+ static Result * cur;
+ };
+
+ Result * Result::cur = 0;
+
+ int minutesRunning = 0; // reset to 0 each time a new test starts
+ mutex minutesRunningMutex("minutesRunningMutex");
+ string currentTestName;
+
+ Result * Suite::run( const string& filter ) {
+ // set tlogLevel to -1 to suppress tlog() output in a test program
+ tlogLevel = -1;
+
+ log(1) << "\t about to setupTests" << endl;
+ setupTests();
+ log(1) << "\t done setupTests" << endl;
+
+ Result * r = new Result( _name );
+ Result::cur = r;
+
+ /* see note in SavedContext */
+ //writelock lk("");
+
+ for ( list<TestCase*>::iterator i=_tests.begin(); i!=_tests.end(); i++ ) {
+ TestCase * tc = *i;
+ if ( filter.size() && tc->getName().find( filter ) == string::npos ) {
+ log(1) << "\t skipping test: " << tc->getName() << " because doesn't match filter" << endl;
+ continue;
+ }
+
+ r->_tests++;
+
+ bool passes = false;
+
+ log(1) << "\t going to run test: " << tc->getName() << endl;
+
+ stringstream err;
+ err << tc->getName() << "\t";
+
+ {
+ scoped_lock lk(minutesRunningMutex);
+ minutesRunning = 0;
+ currentTestName = tc->getName();
+ }
+
+ try {
+ tc->run();
+ passes = true;
+ }
+ catch ( MyAssertionException * ae ) {
+ err << ae->ss.str();
+ delete( ae );
+ }
+ catch ( std::exception& e ) {
+ err << " exception: " << e.what();
+ }
+ catch ( int x ) {
+ err << " caught int : " << x << endl;
+ }
+ catch ( ... ) {
+ cerr << "unknown exception in test: " << tc->getName() << endl;
+ }
+
+ if ( ! passes ) {
+ string s = err.str();
+ log() << "FAIL: " << s << endl;
+ r->_fails++;
+ r->_messages.push_back( s );
+ }
+ }
+
+ if ( r->_fails )
+ r->_rc = 17;
+
+ log(1) << "\t DONE running tests" << endl;
+
+ return r;
+ }
+
+ void show_help_text(const char* name, po::options_description options) {
+ cout << "usage: " << name << " [options] [suite]..." << endl
+ << options << "suite: run the specified test suite(s) only" << endl;
+ }
+
+ class TestWatchDog : public BackgroundJob {
+ public:
+ virtual string name() const { return "TestWatchDog"; }
+ virtual void run(){
+
+ while (true) {
+ sleepsecs(60);
+
+ scoped_lock lk(minutesRunningMutex);
+ minutesRunning++; //reset to 0 when new test starts
+
+ if (minutesRunning > 30){
+ log() << currentTestName << " has been running for more than 30 minutes. aborting." << endl;
+ ::abort();
+ }
+ else if (minutesRunning > 1){
+ warning() << currentTestName << " has been running for more than " << minutesRunning-1 << " minutes." << endl;
+ }
+ }
+ }
+ };
+
+ unsigned perfHist = 1;
+
+ int Suite::run( int argc , char** argv , string default_dbpath ) {
+ unsigned long long seed = time( 0 );
+ string dbpathSpec;
+
+ po::options_description shell_options("options");
+ po::options_description hidden_options("Hidden options");
+ po::options_description cmdline_options("Command line options");
+ po::positional_options_description positional_options;
+
+ shell_options.add_options()
+ ("help,h", "show this usage information")
+ ("dbpath", po::value<string>(&dbpathSpec)->default_value(default_dbpath),
+ "db data path for this test run. NOTE: the contents of this "
+ "directory will be overwritten if it already exists")
+ ("debug", "run tests with verbose output")
+ ("list,l", "list available test suites")
+ ("bigfiles", "use big datafiles instead of smallfiles which is the default")
+ ("filter,f" , po::value<string>() , "string substring filter on test name" )
+ ("verbose,v", "verbose")
+ ("dur", "enable journaling")
+ ("nodur", "disable journaling (currently the default)")
+ ("seed", po::value<unsigned long long>(&seed), "random number seed")
+ ("perfHist", po::value<unsigned>(&perfHist), "number of back runs of perf stats to display")
+ ;
+
+ hidden_options.add_options()
+ ("suites", po::value< vector<string> >(), "test suites to run")
+ ("nopreallocj", "disable journal prealloc")
+ ;
+
+ positional_options.add("suites", -1);
+
+ cmdline_options.add(shell_options).add(hidden_options);
+
+ po::variables_map params;
+ int command_line_style = (((po::command_line_style::unix_style ^
+ po::command_line_style::allow_guessing) |
+ po::command_line_style::allow_long_disguise) ^
+ po::command_line_style::allow_sticky);
+
+ try {
+ po::store(po::command_line_parser(argc, argv).options(cmdline_options).
+ positional(positional_options).
+ style(command_line_style).run(), params);
+ po::notify(params);
+ }
+ catch (po::error &e) {
+ cout << "ERROR: " << e.what() << endl << endl;
+ show_help_text(argv[0], shell_options);
+ return EXIT_BADOPTIONS;
+ }
+
+ if (params.count("help")) {
+ show_help_text(argv[0], shell_options);
+ return EXIT_CLEAN;
+ }
+
+ bool nodur = false;
+ if( params.count("nodur") ) {
+ nodur = true;
+ cmdLine.dur = false;
+ }
+ if( params.count("dur") || cmdLine.dur ) {
+ cmdLine.dur = true;
+ }
+
+ if( params.count("nopreallocj") ) {
+ cmdLine.preallocj = false;
+ }
+
+ if (params.count("debug") || params.count("verbose") ) {
+ logLevel = 1;
+ }
+
+ if (params.count("list")) {
+ for ( map<string,Suite*>::iterator i = _suites->begin() ; i != _suites->end(); i++ )
+ cout << i->first << endl;
+ return 0;
+ }
+
+ boost::filesystem::path p(dbpathSpec);
+
+ /* remove the contents of the test directory if it exists. */
+ if (boost::filesystem::exists(p)) {
+ if (!boost::filesystem::is_directory(p)) {
+ cout << "ERROR: path \"" << p.string() << "\" is not a directory" << endl << endl;
+ show_help_text(argv[0], shell_options);
+ return EXIT_BADOPTIONS;
+ }
+ boost::filesystem::directory_iterator end_iter;
+ for (boost::filesystem::directory_iterator dir_iter(p);
+ dir_iter != end_iter; ++dir_iter) {
+ boost::filesystem::remove_all(*dir_iter);
+ }
+ }
+ else {
+ boost::filesystem::create_directory(p);
+ }
+
+ string dbpathString = p.native_directory_string();
+ dbpath = dbpathString.c_str();
+
+ cmdLine.prealloc = false;
+
+ // dbtest defaults to smallfiles
+ cmdLine.smallfiles = true;
+ if( params.count("bigfiles") ) {
+ cmdLine.dur = true;
+ }
+
+ cmdLine.oplogSize = 10 * 1024 * 1024;
+ Client::initThread("testsuite");
+ acquirePathLock();
+
+ srand( (unsigned) seed );
+ printGitVersion();
+ printSysInfo();
+ DEV log() << "_DEBUG build" << endl;
+ if( sizeof(void*)==4 )
+ log() << "32bit" << endl;
+ log() << "random seed: " << seed << endl;
+
+ if( time(0) % 3 == 0 && !nodur ) {
+ cmdLine.dur = true;
+ log() << "****************" << endl;
+ log() << "running with journaling enabled to test that. dbtests will do this occasionally even if --dur is not specified." << endl;
+ log() << "****************" << endl;
+ }
+
+ FileAllocator::get()->start();
+
+ vector<string> suites;
+ if (params.count("suites")) {
+ suites = params["suites"].as< vector<string> >();
+ }
+
+ string filter = "";
+ if ( params.count( "filter" ) ) {
+ filter = params["filter"].as<string>();
+ }
+
+ dur::startup();
+
+ if( debug && cmdLine.dur ) {
+ log() << "_DEBUG: automatically enabling cmdLine.durOptions=8 (DurParanoid)" << endl;
+ // this was commented out. why too slow or something? :
+ cmdLine.durOptions |= 8;
+ }
+
+ TestWatchDog twd;
+ twd.go();
+
+ int ret = run(suites,filter);
+
+#if !defined(_WIN32) && !defined(__sunos__)
+ flock( lockFile, LOCK_UN );
+#endif
+
+ cc().shutdown();
+ dbexit( (ExitCode)ret ); // so everything shuts down cleanly
+ return ret;
+ }
+
+ int Suite::run( vector<string> suites , const string& filter ) {
+ for ( unsigned int i = 0; i < suites.size(); i++ ) {
+ if ( _suites->find( suites[i] ) == _suites->end() ) {
+ cout << "invalid test [" << suites[i] << "], use --list to see valid names" << endl;
+ return -1;
+ }
+ }
+
+ list<string> torun(suites.begin(), suites.end());
+
+ if ( torun.size() == 0 )
+ for ( map<string,Suite*>::iterator i=_suites->begin() ; i!=_suites->end(); i++ )
+ torun.push_back( i->first );
+
+ list<Result*> results;
+
+ for ( list<string>::iterator i=torun.begin(); i!=torun.end(); i++ ) {
+ string name = *i;
+ Suite * s = (*_suites)[name];
+ assert( s );
+
+ log() << "going to run suite: " << name << endl;
+ results.push_back( s->run( filter ) );
+ }
+
+ Logstream::get().flush();
+
+ cout << "**************************************************" << endl;
+
+ int rc = 0;
+
+ int tests = 0;
+ int fails = 0;
+ int asserts = 0;
+
+ for ( list<Result*>::iterator i=results.begin(); i!=results.end(); i++ ) {
+ Result * r = *i;
+ cout << r->toString();
+ if ( abs( r->rc() ) > abs( rc ) )
+ rc = r->rc();
+
+ tests += r->_tests;
+ fails += r->_fails;
+ asserts += r->_asserts;
+ }
+
+ Result totals ("TOTALS");
+ totals._tests = tests;
+ totals._fails = fails;
+ totals._asserts = asserts;
+
+ cout << totals.toString(); // includes endl
+
+ return rc;
+ }
+
+ void Suite::registerSuite( string name , Suite * s ) {
+ if ( ! _suites )
+ _suites = new map<string,Suite*>();
+ Suite*& m = (*_suites)[name];
+ uassert( 10162 , "already have suite with that name" , ! m );
+ m = s;
+ }
+
+ void assert_pass() {
+ Result::cur->_asserts++;
+ }
+
+ void assert_fail( const char * exp , const char * file , unsigned line ) {
+ Result::cur->_asserts++;
+
+ MyAssertionException * e = new MyAssertionException();
+ e->ss << "ASSERT FAILED! " << file << ":" << line << endl;
+ throw e;
+ }
+
+ void fail( const char * exp , const char * file , unsigned line ) {
+ assert(0);
+ }
+
+ MyAssertionException * MyAsserts::getBase() {
+ MyAssertionException * e = new MyAssertionException();
+ e->ss << _file << ":" << _line << " " << _aexp << " != " << _bexp << " ";
+ return e;
+ }
+
+ void MyAsserts::printLocation() {
+ log() << _file << ":" << _line << " " << _aexp << " != " << _bexp << " ";
+ }
+
+ void MyAsserts::_gotAssert() {
+ Result::cur->_asserts++;
+ }
+
+ }
+
+ void setupSignals( bool inFork ) {}
+
+}
diff --git a/src/mongo/dbtests/framework.h b/src/mongo/dbtests/framework.h
new file mode 100644
index 00000000000..adf610a05eb
--- /dev/null
+++ b/src/mongo/dbtests/framework.h
@@ -0,0 +1,199 @@
+// framework.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+
+ simple portable regression system
+ */
+
+#include "../pch.h"
+
+#define ASSERT_THROWS(a,b) \
+ try { \
+ a; \
+ mongo::regression::assert_fail( #a , __FILE__ , __LINE__ ); \
+ } catch ( b& ){ \
+ mongo::regression::assert_pass(); \
+ }
+
+
+
+#define ASSERT_EQUALS(a,b) (mongo::regression::MyAsserts( #a , #b , __FILE__ , __LINE__ ) ).ae( (a) , (b) )
+#define ASSERT_NOT_EQUALS(a,b) (mongo::regression::MyAsserts( #a , #b , __FILE__ , __LINE__ ) ).nae( (a) , (b) )
+
+#define ASSERT(x) (void)( (!(!(x))) ? mongo::regression::assert_pass() : mongo::regression::assert_fail( #x , __FILE__ , __LINE__ ) )
+#define FAIL(x) mongo::regression::fail( #x , __FILE__ , __LINE__ )
+
+#include "../db/instance.h"
+
+namespace mongo {
+
+ namespace regression {
+
+ class Result;
+
+ class TestCase {
+ public:
+ virtual ~TestCase() {}
+ virtual void run() = 0;
+ virtual string getName() = 0;
+ };
+
+ template< class T >
+ class TestHolderBase : public TestCase {
+ public:
+ TestHolderBase() {}
+ virtual ~TestHolderBase() {}
+ virtual void run() {
+ auto_ptr<T> t;
+ t.reset( create() );
+ t->run();
+ }
+ virtual T * create() = 0;
+ virtual string getName() {
+ return demangleName( typeid(T) );
+ }
+ };
+
+ template< class T >
+ class TestHolder0 : public TestHolderBase<T> {
+ public:
+ virtual T * create() {
+ return new T();
+ }
+ };
+
+ template< class T , typename A >
+ class TestHolder1 : public TestHolderBase<T> {
+ public:
+ TestHolder1( const A& a ) : _a(a) {}
+ virtual T * create() {
+ return new T( _a );
+ }
+ const A& _a;
+ };
+
+ class Suite {
+ public:
+ Suite( string name ) : _name( name ) {
+ registerSuite( name , this );
+ _ran = 0;
+ }
+
+ virtual ~Suite() {
+ if ( _ran ) {
+ DBDirectClient c;
+ c.dropDatabase( "unittests" );
+ }
+ }
+
+ template<class T>
+ void add() {
+ _tests.push_back( new TestHolder0<T>() );
+ }
+
+ template<class T , typename A >
+ void add( const A& a ) {
+ _tests.push_back( new TestHolder1<T,A>(a) );
+ }
+
+ Result * run( const string& filter );
+
+ static int run( vector<string> suites , const string& filter );
+ static int run( int argc , char ** argv , string default_dbpath );
+
+
+ protected:
+ virtual void setupTests() = 0;
+
+ private:
+ string _name;
+ list<TestCase*> _tests;
+ bool _ran;
+
+ static map<string,Suite*> * _suites;
+
+ void registerSuite( string name , Suite * s );
+ };
+
+ void assert_pass();
+ void assert_fail( const char * exp , const char * file , unsigned line );
+ void fail( const char * exp , const char * file , unsigned line );
+
+ class MyAssertionException : boost::noncopyable {
+ public:
+ MyAssertionException() {
+ ss << "assertion: ";
+ }
+ stringstream ss;
+ };
+
+
+
+ class MyAsserts {
+ public:
+ MyAsserts( const char * aexp , const char * bexp , const char * file , unsigned line )
+ : _aexp( aexp ) , _bexp( bexp ) , _file( file ) , _line( line ) {
+
+ }
+
+ template<typename A,typename B>
+ void ae( A a , B b ) {
+ _gotAssert();
+ if ( a == b )
+ return;
+
+ printLocation();
+
+ MyAssertionException * e = getBase();
+ e->ss << a << " != " << b << endl;
+ log() << e->ss.str() << endl;
+ throw e;
+ }
+
+ template<typename A,typename B>
+ void nae( A a , B b ) {
+ _gotAssert();
+ if ( a != b )
+ return;
+
+ printLocation();
+
+ MyAssertionException * e = getBase();
+ e->ss << a << " == " << b << endl;
+ log() << e->ss.str() << endl;
+ throw e;
+ }
+
+
+ void printLocation();
+
+ private:
+
+ void _gotAssert();
+
+ MyAssertionException * getBase();
+
+ string _aexp;
+ string _bexp;
+ string _file;
+ unsigned _line;
+ };
+
+ }
+}
diff --git a/src/mongo/dbtests/histogram_test.cpp b/src/mongo/dbtests/histogram_test.cpp
new file mode 100644
index 00000000000..e9cbb5bdf25
--- /dev/null
+++ b/src/mongo/dbtests/histogram_test.cpp
@@ -0,0 +1,94 @@
+// histogramtests.cpp : histogram.{h,cpp} unit tests
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../pch.h"
+
+#include "dbtests.h"
+#include "../util/histogram.h"
+
+namespace mongo {
+
+ using mongo::Histogram;
+
+ class BoundariesInit {
+ public:
+ void run() {
+ Histogram::Options opts;
+ opts.numBuckets = 3;
+ opts.bucketSize = 10;
+ Histogram h( opts );
+
+ ASSERT_EQUALS( h.getBucketsNum(), 3u );
+
+ ASSERT_EQUALS( h.getCount( 0 ), 0u );
+ ASSERT_EQUALS( h.getCount( 1 ), 0u );
+ ASSERT_EQUALS( h.getCount( 2 ), 0u );
+
+ ASSERT_EQUALS( h.getBoundary( 0 ), 10u );
+ ASSERT_EQUALS( h.getBoundary( 1 ), 20u );
+ ASSERT_EQUALS( h.getBoundary( 2 ), numeric_limits<uint32_t>::max() );
+ }
+ };
+
+ class BoundariesExponential {
+ public:
+ void run() {
+ Histogram::Options opts;
+ opts.numBuckets = 4;
+ opts.bucketSize = 125;
+ opts.exponential = true;
+ Histogram h( opts );
+
+ ASSERT_EQUALS( h.getBoundary( 0 ), 125u );
+ ASSERT_EQUALS( h.getBoundary( 1 ), 250u );
+ ASSERT_EQUALS( h.getBoundary( 2 ), 500u );
+ ASSERT_EQUALS( h.getBoundary( 3 ), numeric_limits<uint32_t>::max() );
+ }
+ };
+
+ class BoundariesFind {
+ public:
+ void run() {
+ Histogram::Options opts;
+ opts.numBuckets = 3;
+ opts.bucketSize = 10;
+ Histogram h( opts );
+
+ h.insert( 10 ); // end of first bucket
+ h.insert( 15 ); // second bucket
+ h.insert( 18 ); // second bucket
+
+ ASSERT_EQUALS( h.getCount( 0 ), 1u );
+ ASSERT_EQUALS( h.getCount( 1 ), 2u );
+ ASSERT_EQUALS( h.getCount( 2 ), 0u );
+ }
+ };
+
+ class HistogramSuite : public Suite {
+ public:
+ HistogramSuite() : Suite( "histogram" ) {}
+
+ void setupTests() {
+ add< BoundariesInit >();
+ add< BoundariesExponential >();
+ add< BoundariesFind >();
+ // TODO: complete the test suite
+ }
+ } histogramSuite;
+
+} // anonymous namespace
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
new file mode 100644
index 00000000000..709c013f6d8
--- /dev/null
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -0,0 +1,2208 @@
+// jsobjtests.cpp - Tests for jsobj.{h,cpp} code
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../bson/util/builder.h"
+#include "../db/jsobj.h"
+#include "../db/jsobjmanipulator.h"
+#include "../db/json.h"
+#include "../db/repl.h"
+#include "../db/extsort.h"
+#include "dbtests.h"
+#include "../util/mongoutils/checksum.h"
+#include "../db/key.h"
+#include "../db/btree.h"
+
+namespace JsobjTests {
+
+ IndexInterface& indexInterfaceForTheseTests = (time(0)%2) ? *IndexDetails::iis[0] : *IndexDetails::iis[1];
+
+ void keyTest(const BSONObj& o, bool mustBeCompact = false) {
+ static KeyV1Owned *kLast;
+ static BSONObj last;
+
+ KeyV1Owned *key = new KeyV1Owned(o);
+ KeyV1Owned& k = *key;
+
+ ASSERT( !mustBeCompact || k.isCompactFormat() );
+
+ BSONObj x = k.toBson();
+ int res = o.woCompare(x, BSONObj(), /*considerfieldname*/false);
+ if( res ) {
+ cout << o.toString() << endl;
+ k.toBson();
+ cout << x.toString() << endl;
+ o.woCompare(x, BSONObj(), /*considerfieldname*/false);
+ ASSERT( res == 0 );
+ }
+ ASSERT( k.woEqual(k) );
+ ASSERT( !k.isCompactFormat() || k.dataSize() < o.objsize() );
+
+ {
+ // check BSONObj::equal. this part not a KeyV1 test.
+ int res = o.woCompare(last);
+ ASSERT( (res==0) == o.equal(last) );
+ }
+
+ if( kLast ) {
+ int r1 = o.woCompare(last, BSONObj(), false);
+ int r2 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ bool ok = (r1<0 && r2<0) || (r1>0&&r2>0) || r1==r2;
+ if( !ok ) {
+ cout << "r1r2 " << r1 << ' ' << r2 << endl;
+ cout << "o:" << o.toString() << endl;
+ cout << "last:" << last.toString() << endl;
+ cout << "k:" << k.toString() << endl;
+ cout << "kLast:" << kLast->toString() << endl;
+ int r3 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ cout << r3 << endl;
+ }
+ ASSERT(ok);
+ if( k.isCompactFormat() && kLast->isCompactFormat() ) { // only check if not bson as bson woEqual is broken! (or was may2011)
+ if( k.woEqual(*kLast) != (r2 == 0) ) { // check woEqual matches
+ cout << r2 << endl;
+ cout << k.toString() << endl;
+ cout << kLast->toString() << endl;
+ k.woEqual(*kLast);
+ ASSERT(false);
+ }
+ }
+ }
+
+ delete kLast;
+ kLast = key;
+ last = o.getOwned();
+ }
+
+ class BufBuilderBasic {
+ public:
+ void run() {
+ {
+ BufBuilder b( 0 );
+ b.appendStr( "foo" );
+ ASSERT_EQUALS( 4, b.len() );
+ ASSERT( strcmp( "foo", b.buf() ) == 0 );
+ }
+ {
+ mongo::StackBufBuilder b;
+ b.appendStr( "foo" );
+ ASSERT_EQUALS( 4, b.len() );
+ ASSERT( strcmp( "foo", b.buf() ) == 0 );
+ }
+ }
+ };
+
+ class BSONElementBasic {
+ public:
+ void run() {
+ ASSERT_EQUALS( 1, BSONElement().size() );
+
+ BSONObj x;
+ ASSERT_EQUALS( 1, x.firstElement().size() );
+ }
+ };
+
+ namespace BSONObjTests {
+ class Create {
+ public:
+ void run() {
+ BSONObj b;
+ ASSERT_EQUALS( 0, b.nFields() );
+ }
+ };
+
+ class Base {
+ protected:
+ static BSONObj basic( const char *name, int val ) {
+ BSONObjBuilder b;
+ b.append( name, val );
+ return b.obj();
+ }
+ static BSONObj basic( const char *name, vector< int > val ) {
+ BSONObjBuilder b;
+ b.append( name, val );
+ return b.obj();
+ }
+ template< class T >
+ static BSONObj basic( const char *name, T val ) {
+ BSONObjBuilder b;
+ b.append( name, val );
+ return b.obj();
+ }
+ };
+
+ class WoCompareBasic : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ) ) > 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ) ) < 0 );
+ // field name comparison
+ ASSERT( basic( "a", 1 ).woCompare( basic( "b", 1 ) ) < 0 );
+ }
+ };
+
+ class NumericCompareBasic : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1.0 ) ) == 0 );
+ }
+ };
+
+ class WoCompareEmbeddedObject : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
+ ( basic( "a", basic( "b", 1.0 ) ) ) == 0 );
+ ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
+ ( basic( "a", basic( "b", 2 ) ) ) < 0 );
+ }
+ };
+
+ class WoCompareEmbeddedArray : public Base {
+ public:
+ void run() {
+ vector< int > i;
+ i.push_back( 1 );
+ i.push_back( 2 );
+ vector< double > d;
+ d.push_back( 1 );
+ d.push_back( 2 );
+ ASSERT( basic( "a", i ).woCompare( basic( "a", d ) ) == 0 );
+
+ vector< int > j;
+ j.push_back( 1 );
+ j.push_back( 3 );
+ ASSERT( basic( "a", i ).woCompare( basic( "a", j ) ) < 0 );
+ }
+ };
+
+ class WoCompareOrdered : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) > 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", 1 ) ) < 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) < 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", -1 ) ) > 0 );
+ }
+ };
+
+ class WoCompareDifferentLength : public Base {
+ public:
+ void run() {
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << 1 << "b" << 1 ) ) < 0 );
+ ASSERT( BSON( "a" << 1 << "b" << 1 ).woCompare( BSON( "a" << 1 ) ) > 0 );
+ }
+ };
+
+ class WoSortOrder : public Base {
+ public:
+ void run() {
+ ASSERT( BSON( "a" << 1 ).woSortOrder( BSON( "a" << 2 ), BSON( "b" << 1 << "a" << 1 ) ) < 0 );
+ ASSERT( fromjson( "{a:null}" ).woSortOrder( BSON( "b" << 1 ), BSON( "a" << 1 ) ) == 0 );
+ }
+ };
+
+ class MultiKeySortOrder : public Base {
+ public:
+ void run() {
+ ASSERT( BSON( "x" << "a" ).woCompare( BSON( "x" << "b" ) ) < 0 );
+ ASSERT( BSON( "x" << "b" ).woCompare( BSON( "x" << "a" ) ) > 0 );
+
+ ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "a" << "y" << "b" ) ) < 0 );
+ ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "b" << "y" << "a" ) ) < 0 );
+ ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "b" ) ) < 0 );
+
+ ASSERT( BSON( "x" << "c" ).woCompare( BSON( "x" << "b" << "y" << "h" ) ) > 0 );
+ ASSERT( BSON( "x" << "b" << "y" << "b" ).woCompare( BSON( "x" << "c" ) ) < 0 );
+
+ BSONObj key = BSON( "x" << 1 << "y" << 1 );
+
+ ASSERT( BSON( "x" << "c" ).woSortOrder( BSON( "x" << "b" << "y" << "h" ) , key ) > 0 );
+ ASSERT( BSON( "x" << "b" << "y" << "b" ).woCompare( BSON( "x" << "c" ) , key ) < 0 );
+
+ key = BSON( "" << 1 << "" << 1 );
+
+ ASSERT( BSON( "" << "c" ).woSortOrder( BSON( "" << "b" << "" << "h" ) , key ) > 0 );
+ ASSERT( BSON( "" << "b" << "" << "b" ).woCompare( BSON( "" << "c" ) , key ) < 0 );
+
+ {
+ // test a big key
+ string x(2000, 'z');
+ BSONObj o = BSON( "q" << x );
+ keyTest(o, false);
+ }
+ {
+ string y(200, 'w');
+ BSONObjBuilder b;
+ for( int i = 0; i < 10; i++ ) {
+ b.append("x", y);
+ }
+ keyTest(b.obj(), true);
+ }
+ {
+ double nan = numeric_limits<double>::quiet_NaN();
+ BSONObj o = BSON( "y" << nan );
+ keyTest(o);
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append( "" , "c" );
+ b.appendNull( "" );
+ BSONObj o = b.obj();
+ keyTest(o);
+ ASSERT( o.woSortOrder( BSON( "" << "b" << "" << "h" ) , key ) > 0 );
+ ASSERT( BSON( "" << "b" << "" << "h" ).woSortOrder( o , key ) < 0 );
+
+ }
+
+ ASSERT( BSON( "" << "a" ).woCompare( BSON( "" << "a" << "" << "c" ) ) < 0 );
+ {
+ BSONObjBuilder b;
+ b.append( "" , "a" );
+ b.appendNull( "" );
+ ASSERT( b.obj().woCompare( BSON( "" << "a" << "" << "c" ) ) < 0 ); // SERVER-282
+ }
+
+ }
+ };
+
+ class TimestampTest : public Base {
+ public:
+ void run() {
+ Client *c = currentClient.get();
+ if( c == 0 ) {
+ Client::initThread("pretouchN");
+ c = &cc();
+ }
+ writelock lk(""); // for initTimestamp
+
+ BSONObjBuilder b;
+ b.appendTimestamp( "a" );
+ BSONObj o = b.done();
+ o.toString();
+ ASSERT( o.valid() );
+ ASSERT_EQUALS( Timestamp, o.getField( "a" ).type() );
+ BSONObjIterator i( o );
+ ASSERT( i.moreWithEOO() );
+ ASSERT( i.more() );
+
+ BSONElement e = i.next();
+ ASSERT_EQUALS( Timestamp, e.type() );
+ ASSERT( i.moreWithEOO() );
+ ASSERT( ! i.more() );
+
+ e = i.next();
+ ASSERT( e.eoo() );
+
+ OpTime before = OpTime::now();
+ BSONElementManipulator( o.firstElement() ).initTimestamp();
+ OpTime after = OpTime::now();
+
+ OpTime test = OpTime( o.firstElement().date() );
+ ASSERT( before < test && test < after );
+
+ BSONElementManipulator( o.firstElement() ).initTimestamp();
+ test = OpTime( o.firstElement().date() );
+ ASSERT( before < test && test < after );
+
+ OpTime x(123,456);
+ ASSERT_EQUALS( 528280977864LL , x.asLL() );
+ }
+ };
+
+ class Nan : public Base {
+ public:
+ void run() {
+ double inf = numeric_limits< double >::infinity();
+ double nan = numeric_limits< double >::quiet_NaN();
+ double nan2 = numeric_limits< double >::signaling_NaN();
+ ASSERT( isNaN(nan) );
+ ASSERT( isNaN(nan2) );
+ ASSERT( !isNaN(inf) );
+
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << inf ) ) == 0 );
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << 1 ) ) > 0 );
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << inf ) ) < 0 );
+
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan ) ) == 0 );
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 1 ) ) < 0 );
+
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 5000000000LL ) ) < 0 );
+
+ {
+ KeyV1Owned a( BSON( "a" << nan ) );
+ KeyV1Owned b( BSON( "a" << 1 ) );
+ Ordering o = Ordering::make(BSON("a"<<1));
+ ASSERT( a.woCompare(b, o) < 0 );
+ }
+
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan ) ) > 0 );
+
+ ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << nan2 ) ) == 0 );
+ ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << 1 ) ) < 0 );
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan2 ) ) > 0 );
+
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan ) ) > 0 );
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan2 ) ) > 0 );
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan2 ) ) == 0 );
+ }
+ };
+
+ class AsTempObj {
+ public:
+ void run() {
+ {
+ BSONObjBuilder bb;
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+4)+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ bb << "b" << 2;
+ BSONObj obj = bb.obj();
+ ASSERT_EQUALS(obj.objsize() , 4+(1+2+4)+(1+2+4)+1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << 1 << "b" << 2));
+ }
+ {
+ BSONObjBuilder bb;
+ bb << "a" << GT << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+(4+1+4+4+1))+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << BSON("$gt" << 1)));
+
+ bb << "b" << LT << 2;
+ BSONObj obj = bb.obj();
+ ASSERT(obj.objsize() == 4+(1+2+(4+1+4+4+1))+(1+2+(4+1+4+4+1))+1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << BSON("$gt" << 1)
+ << "b" << BSON("$lt" << 2)));
+ }
+ {
+ BSONObjBuilder bb(32);
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+4)+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ //force a realloc
+ BSONArrayBuilder arr;
+ for (int i=0; i < 10000; i++) {
+ arr << i;
+ }
+ bb << "b" << arr.arr();
+ BSONObj obj = bb.obj();
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ }
+ }
+ };
+
+ struct AppendIntOrLL {
+ void run() {
+ const long long billion = 1000*1000*1000;
+
+ long long n = 0x3333111122224444LL;
+ {
+ double d = (double) n;
+ BSONObj a = BSON( "x" << n );
+ BSONObj b = BSON( "x" << d );
+
+ long long back = (long long) d;
+//3717
+////// int res = a.woCompare(b);
+
+ ASSERT( n > back );
+ //ASSERT( res > 0 ); // SERVER-3717
+
+ keyTest(a, false);
+
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+//3717
+////// int res2 = A.woCompare(B, Ordering::make(BSONObj()));
+ // ASSERT( res2 > 0 ); // SERVER-3717
+
+ // fixing requires an index v# change.
+
+ cout << "todo fix SERVER-3717 and uncomment test in AppendIntOrLL" << endl;
+
+ n++;
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendIntOrLL("L4", -4*billion);
+ keyTest(b.obj());
+ keyTest( BSON("" << billion) );
+ }
+
+ BSONObjBuilder b;
+ b.appendIntOrLL("i1", 1);
+ b.appendIntOrLL("i2", -1);
+ b.appendIntOrLL("i3", 1*billion);
+ b.appendIntOrLL("i4", -1*billion);
+
+ b.appendIntOrLL("L1", 2*billion);
+ b.appendIntOrLL("L2", -2*billion);
+ b.appendIntOrLL("L3", 4*billion);
+ b.appendIntOrLL("L4", -4*billion);
+ b.appendIntOrLL("L5", 16*billion);
+ b.appendIntOrLL("L6", -16*billion);
+
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT(o["i1"].type() == NumberInt);
+ ASSERT(o["i1"].number() == 1);
+ ASSERT(o["i2"].type() == NumberInt);
+ ASSERT(o["i2"].number() == -1);
+ ASSERT(o["i3"].type() == NumberInt);
+ ASSERT(o["i3"].number() == 1*billion);
+ ASSERT(o["i4"].type() == NumberInt);
+ ASSERT(o["i4"].number() == -1*billion);
+
+ ASSERT(o["L1"].type() == NumberLong);
+ ASSERT(o["L1"].number() == 2*billion);
+ ASSERT(o["L2"].type() == NumberLong);
+ ASSERT(o["L2"].number() == -2*billion);
+ ASSERT(o["L3"].type() == NumberLong);
+ ASSERT(o["L3"].number() == 4*billion);
+ ASSERT(o["L4"].type() == NumberLong);
+ ASSERT(o["L4"].number() == -4*billion);
+ ASSERT(o["L5"].type() == NumberLong);
+ ASSERT(o["L5"].number() == 16*billion);
+ ASSERT(o["L6"].type() == NumberLong);
+ ASSERT(o["L6"].number() == -16*billion);
+ }
+ };
+
+ struct AppendNumber {
+ void run() {
+ BSONObjBuilder b;
+ b.appendNumber( "a" , 5 );
+ b.appendNumber( "b" , 5.5 );
+ b.appendNumber( "c" , (1024LL*1024*1024)-1 );
+ b.appendNumber( "d" , (1024LL*1024*1024*1024)-1 );
+ b.appendNumber( "e" , 1024LL*1024*1024*1024*1024*1024 );
+
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT( o["a"].type() == NumberInt );
+ ASSERT( o["b"].type() == NumberDouble );
+ ASSERT( o["c"].type() == NumberInt );
+ ASSERT( o["d"].type() == NumberDouble );
+ ASSERT( o["e"].type() == NumberLong );
+
+ }
+ };
+
+ class ToStringArray {
+ public:
+ void run() {
+ string spec = "{ a: [ \"a\", \"b\" ] }";
+ ASSERT_EQUALS( spec, fromjson( spec ).toString() );
+
+ BSONObj x = BSON( "a" << "astring" << "b" << "str" );
+ keyTest(x);
+ keyTest(x);
+ BSONObj y = BSON( "a" << "astring" << "b" << "stra" );
+ keyTest(y);
+ y = BSON( "a" << "" );
+ keyTest(y);
+
+ keyTest( BSON("abc" << true ) );
+ keyTest( BSON("abc" << false ) );
+ keyTest( BSON("abc" << false << "b" << true ) );
+
+ Date_t now = jsTime();
+ keyTest( BSON("" << now << "" << 3 << "" << jstNULL << "" << true) );
+ keyTest( BSON("" << now << "" << 3 << "" << BSONObj() << "" << true) );
+
+ {
+ {
+ // check signed dates with new key format
+ KeyV1Owned a( BSONObjBuilder().appendDate("", -50).obj() );
+ KeyV1Owned b( BSONObjBuilder().appendDate("", 50).obj() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
+ }
+ {
+ // backward compatibility
+ KeyBson a( BSONObjBuilder().appendDate("", -50).obj() );
+ KeyBson b( BSONObjBuilder().appendDate("", 50).obj() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
+ }
+ {
+ // this is an uncompactible key:
+ BSONObj uc1 = BSONObjBuilder().appendDate("", -50).appendCode("", "abc").obj();
+ BSONObj uc2 = BSONObjBuilder().appendDate("", 55).appendCode("", "abc").obj();
+ ASSERT( uc1.woCompare(uc2, Ordering::make(BSONObj())) < 0 );
+ {
+ KeyV1Owned a(uc1);
+ KeyV1Owned b(uc2);
+ ASSERT( !a.isCompactFormat() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
+ }
+ {
+ KeyBson a(uc1);
+ KeyBson b(uc2);
+ ASSERT( !a.isCompactFormat() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
+ }
+ }
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 8, (BinDataType) 1, "aaaabbbb");
+ b.appendBinData("e", 3, (BinDataType) 1, "aaa");
+ b.appendBinData("b", 1, (BinDataType) 1, "x");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ // check (non)equality
+ BSONObj a = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgh").obj();
+ BSONObj b = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgj").obj();
+ ASSERT( !a.equal(b) );
+ int res_ab = a.woCompare(b);
+ ASSERT( res_ab != 0 );
+ keyTest( a, true );
+ keyTest( b, true );
+
+ // check subtypes do not equal
+ BSONObj c = BSONObjBuilder().appendBinData("", 8, (BinDataType) 4, "abcdefgh").obj();
+ BSONObj d = BSONObjBuilder().appendBinData("", 8, (BinDataType) 0x81, "abcdefgh").obj();
+ ASSERT( !a.equal(c) );
+ int res_ac = a.woCompare(c);
+ ASSERT( res_ac != 0 );
+ keyTest( c, true );
+ ASSERT( !a.equal(d) );
+ int res_ad = a.woCompare(d);
+ ASSERT( res_ad != 0 );
+ keyTest( d, true );
+
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+ KeyV1Owned C(c);
+ KeyV1Owned D(d);
+ ASSERT( !A.woEqual(B) );
+ ASSERT( A.woCompare(B, Ordering::make(BSONObj())) < 0 && res_ab < 0 );
+ ASSERT( !A.woEqual(C) );
+ ASSERT( A.woCompare(C, Ordering::make(BSONObj())) < 0 && res_ac < 0 );
+ ASSERT( !A.woEqual(D) );
+ ASSERT( A.woCompare(D, Ordering::make(BSONObj())) < 0 && res_ad < 0 );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 33, (BinDataType) 1, "123456789012345678901234567890123");
+ BSONObj o = b.obj();
+ keyTest( o, false );
+ }
+
+ {
+ for( int i = 1; i <= 3; i++ ) {
+ for( int j = 1; j <= 3; j++ ) {
+ BSONObjBuilder b;
+ b.appendBinData("f", i, (BinDataType) j, "abc");
+ BSONObj o = b.obj();
+ keyTest( o, j != ByteArrayDeprecated );
+ }
+ }
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 1, (BinDataType) 133, "a");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append("AA", 3);
+ b.appendBinData("f", 0, (BinDataType) 0, "");
+ b.appendBinData("e", 3, (BinDataType) 7, "aaa");
+ b.appendBinData("b", 1, (BinDataType) 128, "x");
+ b.append("z", 3);
+ b.appendBinData("bb", 0, (BinDataType) 129, "x");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ // 9 is not supported in compact format. so test a non-compact case here.
+ BSONObjBuilder b;
+ b.appendBinData("f", 9, (BinDataType) 0, "aaaabbbbc");
+ BSONObj o = b.obj();
+ keyTest( o );
+ }
+ }
+ };
+
+ class ToStringNumber {
+ public:
+
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a" , (int)4 );
+ b.append( "b" , (double)5 );
+ b.append( "c" , (long long)6 );
+
+ b.append( "d" , 123.456789123456789123456789123456789 );
+ b.append( "e" , 123456789.123456789123456789123456789 );
+ b.append( "f" , 1234567891234567891234.56789123456789 );
+
+ b.append( "g" , -123.456 );
+
+ BSONObj x = b.obj();
+ keyTest(x);
+
+ ASSERT_EQUALS( "4", x["a"].toString( false , true ) );
+ ASSERT_EQUALS( "5.0", x["b"].toString( false , true ) );
+ ASSERT_EQUALS( "6", x["c"].toString( false , true ) );
+
+ ASSERT_EQUALS( "123.4567891234568" , x["d"].toString( false , true ) );
+ ASSERT_EQUALS( "123456789.1234568" , x["e"].toString( false , true ) );
+ // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
+
+ ASSERT_EQUALS( "-123.456" , x["g"].toString( false , true ) );
+
+ }
+ };
+
+ class NullString {
+ public:
+ void run() {
+ {
+ BSONObjBuilder b;
+ const char x[] = {'a', 0, 'b', 0};
+ b.append("field", x, 4);
+ b.append("z", true);
+ BSONObj B = b.obj();
+ //cout << B.toString() << endl;
+
+ BSONObjBuilder a;
+ const char xx[] = {'a', 0, 'c', 0};
+ a.append("field", xx, 4);
+ a.append("z", true);
+ BSONObj A = a.obj();
+
+ BSONObjBuilder c;
+ const char xxx[] = {'a', 0, 'c', 0, 0};
+ c.append("field", xxx, 5);
+ c.append("z", true);
+ BSONObj C = c.obj();
+
+ // test that nulls are ok within bson strings
+ ASSERT( !(A == B) );
+ ASSERT( A > B );
+
+ ASSERT( !(B == C) );
+ ASSERT( C > B );
+
+ // check iteration is ok
+ ASSERT( B["z"].Bool() && A["z"].Bool() && C["z"].Bool() );
+ }
+
+ BSONObjBuilder b;
+ b.append("a", "a\0b", 4);
+ string z("a\0b", 3);
+ b.append("b", z);
+ b.appendAs(b.asTempObj()["a"], "c");
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ stringstream ss;
+ ss << 'a' << '\0' << 'b';
+
+ ASSERT_EQUALS(o["a"].valuestrsize(), 3+1);
+ ASSERT_EQUALS(o["a"].str(), ss.str());
+
+ ASSERT_EQUALS(o["b"].valuestrsize(), 3+1);
+ ASSERT_EQUALS(o["b"].str(), ss.str());
+
+ ASSERT_EQUALS(o["c"].valuestrsize(), 3+1);
+ ASSERT_EQUALS(o["c"].str(), ss.str());
+
+ }
+
+ };
+
+ class AppendAs {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ {
+ BSONObj foo = BSON( "foo" << 1 );
+ b.appendAs( foo.firstElement(), "bar" );
+ }
+ ASSERT_EQUALS( BSON( "bar" << 1 ), b.done() );
+ }
+ };
+
+ class ArrayAppendAs {
+ public:
+ void run() {
+ BSONArrayBuilder b;
+ {
+ BSONObj foo = BSON( "foo" << 1 );
+ b.appendAs( foo.firstElement(), "3" );
+ }
+ BSONArray a = b.arr();
+ BSONObj expected = BSON( "3" << 1 );
+ ASSERT_EQUALS( expected.firstElement(), a[ 3 ] );
+ ASSERT_EQUALS( 4, a.nFields() );
+ }
+ };
+
+ class GetField {
+ public:
+ void run(){
+ BSONObj o = BSON( "a" << 1 <<
+ "b" << BSON( "a" << 2 ) <<
+ "c" << BSON_ARRAY( BSON( "a" << 3 ) << BSON( "a" << 4 ) ) );
+ ASSERT_EQUALS( 1 , o.getFieldDotted( "a" ).numberInt() );
+ ASSERT_EQUALS( 2 , o.getFieldDotted( "b.a" ).numberInt() );
+ ASSERT_EQUALS( 3 , o.getFieldDotted( "c.0.a" ).numberInt() );
+ ASSERT_EQUALS( 4 , o.getFieldDotted( "c.1.a" ).numberInt() );
+ keyTest(o);
+ }
+ };
+
+ namespace Validation {
+
+ class Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ ASSERT( valid().valid() );
+ ASSERT( !invalid().valid() );
+ }
+ protected:
+ virtual BSONObj valid() const { return BSONObj(); }
+ virtual BSONObj invalid() const { return BSONObj(); }
+ static char get( const BSONObj &o, int i ) {
+ return o.objdata()[ i ];
+ }
+ static void set( BSONObj &o, int i, char c ) {
+ const_cast< char * >( o.objdata() )[ i ] = c;
+ }
+ };
+
+ class BadType : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 4, 50 );
+ return ret;
+ }
+ };
+
+ class EooBeforeEnd : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ // (first byte of size)++
+ set( ret, 0, get( ret, 0 ) + 1 );
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+ };
+
+ class Undefined : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ BSONObj o = b.done();
+ set( o, 4, mongo::Undefined );
+ ASSERT( o.valid() );
+ }
+ };
+
+ class TotalSizeTooSmall : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ // (first byte of size)--
+ set( ret, 0, get( ret, 0 ) - 1 );
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+ };
+
+ class EooMissing : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, ret.objsize() - 1, (char) 0xff );
+ // (first byte of size)--
+ set( ret, 0, get( ret, 0 ) - 1 );
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+ };
+
+ class WrongStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":\"b\"}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ ASSERT_EQUALS( ret.firstElement().valuestr()[0] , 'b' );
+ ASSERT_EQUALS( ret.firstElement().valuestr()[1] , 0 );
+ ((char*)ret.firstElement().valuestr())[1] = 1;
+ return ret.copy();
+ }
+ };
+
+ class ZeroStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":\"b\"}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 7, 0 );
+ return ret;
+ }
+ };
+
+ class NegativeStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":\"b\"}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 10, -100 );
+ return ret;
+ }
+ };
+
+ class WrongSubobjectSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":{\"b\":1}}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 0, get( ret, 0 ) + 1 );
+ set( ret, 7, get( ret, 7 ) + 1 );
+ return ret.copy();
+ }
+ };
+
+ class WrongDbrefNsSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{ \"a\": Dbref( \"b\", \"ffffffffffffffffffffffff\" ) }" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 0, get( ret, 0 ) + 1 );
+ set( ret, 7, get( ret, 7 ) + 1 );
+ return ret.copy();
+ };
+ };
+
+ class NoFieldNameEnd : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset( const_cast< char * >( ret.objdata() ) + 5, 0xff, ret.objsize() - 5 );
+ return ret;
+ }
+ };
+
+ class BadRegex : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":/c/i}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset( const_cast< char * >( ret.objdata() ) + 7, 0xff, ret.objsize() - 7 );
+ return ret;
+ }
+ };
+
+ class BadRegexOptions : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":/c/i}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset( const_cast< char * >( ret.objdata() ) + 9, 0xff, ret.objsize() - 9 );
+ return ret;
+ }
+ };
+
+ class CodeWScopeBase : public Base {
+ BSONObj valid() const {
+ BSONObjBuilder b;
+ BSONObjBuilder scope;
+ scope.append( "a", "b" );
+ b.appendCodeWScope( "c", "d", scope.done() );
+ return b.obj();
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ modify( ret );
+ return ret;
+ }
+ protected:
+ virtual void modify( BSONObj &o ) const = 0;
+ };
+
+ class CodeWScopeSmallSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 7, 7 );
+ }
+ };
+
+ class CodeWScopeZeroStrSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 11, 0 );
+ }
+ };
+
+ class CodeWScopeSmallStrSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 11, 1 );
+ }
+ };
+
+ class CodeWScopeNoSizeForObj : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 7, 13 );
+ }
+ };
+
+ class CodeWScopeSmallObjSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 17, 1 );
+ }
+ };
+
+ class CodeWScopeBadObject : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 21, JSTypeMax + 1 );
+ }
+ };
+
+ class NoSize {
+ public:
+ NoSize( BSONType type ) : type_( type ) {}
+ void run() {
+ const char data[] = { 0x07, 0x00, 0x00, 0x00, char( type_ ), 'a', 0x00 };
+ BSONObj o( data );
+ ASSERT( !o.valid() );
+ }
+ private:
+ BSONType type_;
+ };
+
+ // Randomized BSON parsing test. See if we seg fault.
+ class Fuzz {
+ public:
+ Fuzz( double frequency ) : frequency_( frequency ) {}
+ void run() {
+ BSONObj a = fromjson( "{\"a\": 1, \"b\": \"c\"}" );
+ fuzz( a );
+ a.valid();
+
+ BSONObj b = fromjson( "{\"one\":2, \"two\":5, \"three\": {},"
+ "\"four\": { \"five\": { \"six\" : 11 } },"
+ "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
+ "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
+ "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
+ "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"00\" },"
+ "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }" );
+ fuzz( b );
+ b.valid();
+ }
+ private:
+ void fuzz( BSONObj &o ) const {
+ for( int i = 4; i < o.objsize(); ++i )
+ for( unsigned char j = 1; j; j <<= 1 )
+ if ( rand() < int( RAND_MAX * frequency_ ) ) {
+ char *c = const_cast< char * >( o.objdata() ) + i;
+ if ( *c & j )
+ *c &= ~j;
+ else
+ *c |= j;
+ }
+ }
+ double frequency_;
+ };
+
+ } // namespace Validation
+
+ } // namespace BSONObjTests
+
+ namespace OIDTests {
+
+ class init1 {
+ public:
+ void run() {
+ OID a;
+ OID b;
+
+ a.init();
+ b.init();
+
+ ASSERT( a != b );
+ }
+ };
+
+ class initParse1 {
+ public:
+ void run() {
+
+ OID a;
+ OID b;
+
+ a.init();
+ b.init( a.str() );
+
+ ASSERT( a == b );
+ }
+ };
+
+ class append {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendOID( "a" , 0 );
+ b.appendOID( "b" , 0 , false );
+ b.appendOID( "c" , 0 , true );
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT( o["a"].__oid().str() == "000000000000000000000000" );
+ ASSERT( o["b"].__oid().str() == "000000000000000000000000" );
+ ASSERT( o["c"].__oid().str() != "000000000000000000000000" );
+
+ }
+ };
+
+ class increasing {
+ public:
+ BSONObj g() {
+ BSONObjBuilder b;
+ b.appendOID( "_id" , 0 , true );
+ return b.obj();
+ }
+ void run() {
+ BSONObj a = g();
+ BSONObj b = g();
+
+ ASSERT( a.woCompare( b ) < 0 );
+
+ // yes, there is a 1/1000 chance this won't increase time(0)
+ // and therefore inaccurately say the function is behaving
+ // buf if its broken, it will fail 999/1000, so i think that's good enough
+ sleepsecs( 1 );
+ BSONObj c = g();
+ ASSERT( a.woCompare( c ) < 0 );
+ }
+ };
+
+ class ToDate {
+ public:
+ void run() {
+ OID oid;
+
+ {
+ time_t before = ::time(0);
+ oid.init();
+ time_t after = ::time(0);
+ ASSERT( oid.asTimeT() >= before );
+ ASSERT( oid.asTimeT() <= after );
+ }
+
+ {
+ Date_t before = jsTime();
+ sleepsecs(1);
+ oid.init();
+ Date_t after = jsTime();
+ ASSERT( oid.asDateT() >= before );
+ ASSERT( oid.asDateT() <= after );
+ }
+ }
+ };
+
+ class FromDate {
+ public:
+ void run() {
+ OID min, oid, max;
+ Date_t now = jsTime();
+ oid.init(); // slight chance this has different time. If its a problem, can change.
+ min.init(now);
+ max.init(now, true);
+
+ ASSERT_EQUALS( (unsigned)oid.asTimeT() , now/1000 );
+ ASSERT_EQUALS( (unsigned)min.asTimeT() , now/1000 );
+ ASSERT_EQUALS( (unsigned)max.asTimeT() , now/1000 );
+ ASSERT( BSON("" << min).woCompare( BSON("" << oid) ) < 0 );
+ ASSERT( BSON("" << max).woCompare( BSON("" << oid) )> 0 );
+ }
+ };
+ } // namespace OIDTests
+
+
+ namespace ValueStreamTests {
+
+ class LabelBase {
+ public:
+ virtual ~LabelBase() {}
+ void run() {
+ ASSERT( !expected().woCompare( actual() ) );
+ }
+ protected:
+ virtual BSONObj expected() = 0;
+ virtual BSONObj actual() = 0;
+ };
+
+ class LabelBasic : public LabelBase {
+ BSONObj expected() {
+ return BSON( "a" << ( BSON( "$gt" << 1 ) ) );
+ }
+ BSONObj actual() {
+ return BSON( "a" << GT << 1 );
+ }
+ };
+
+ class LabelShares : public LabelBase {
+ BSONObj expected() {
+ return BSON( "z" << "q" << "a" << ( BSON( "$gt" << 1 ) ) << "x" << "p" );
+ }
+ BSONObj actual() {
+ return BSON( "z" << "q" << "a" << GT << 1 << "x" << "p" );
+ }
+ };
+
+ class LabelDouble : public LabelBase {
+ BSONObj expected() {
+ return BSON( "a" << ( BSON( "$gt" << 1 << "$lte" << "x" ) ) );
+ }
+ BSONObj actual() {
+ return BSON( "a" << GT << 1 << LTE << "x" );
+ }
+ };
+
+ class LabelDoubleShares : public LabelBase {
+ BSONObj expected() {
+ return BSON( "z" << "q" << "a" << ( BSON( "$gt" << 1 << "$lte" << "x" ) ) << "x" << "p" );
+ }
+ BSONObj actual() {
+ return BSON( "z" << "q" << "a" << GT << 1 << LTE << "x" << "x" << "p" );
+ }
+ };
+
+ class LabelSize : public LabelBase {
+ BSONObj expected() {
+ return BSON( "a" << BSON( "$size" << 4 ) );
+ }
+ BSONObj actual() {
+ return BSON( "a" << mongo::SIZE << 4 );
+ }
+ };
+
+ class LabelMulti : public LabelBase {
+ BSONObj expected() {
+ return BSON( "z" << "q"
+ << "a" << BSON( "$gt" << 1 << "$lte" << "x" )
+ << "b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 )
+ << "x" << "p" );
+ }
+ BSONObj actual() {
+ return BSON( "z" << "q"
+ << "a" << GT << 1 << LTE << "x"
+ << "b" << NE << 1 << NE << "f" << NE << 22.3
+ << "x" << "p" );
+ }
+ };
+ class LabelishOr : public LabelBase {
+ BSONObj expected() {
+ return BSON( "$or" << BSON_ARRAY(
+ BSON("a" << BSON( "$gt" << 1 << "$lte" << "x" ))
+ << BSON("b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 ))
+ << BSON("x" << "p" )));
+ }
+ BSONObj actual() {
+ return OR( BSON( "a" << GT << 1 << LTE << "x"),
+ BSON( "b" << NE << 1 << NE << "f" << NE << 22.3),
+ BSON( "x" << "p" ) );
+ }
+ };
+
+ class Unallowed {
+ public:
+ void run() {
+ ASSERT_THROWS( BSON( GT << 4 ), MsgAssertionException );
+ ASSERT_THROWS( BSON( "a" << 1 << GT << 4 ), MsgAssertionException );
+ }
+ };
+
+ class ElementAppend {
+ public:
+ void run() {
+ BSONObj a = BSON( "a" << 17 );
+ BSONObj b = BSON( "b" << a["a"] );
+ ASSERT_EQUALS( NumberInt , a["a"].type() );
+ ASSERT_EQUALS( NumberInt , b["b"].type() );
+ ASSERT_EQUALS( 17 , b["b"].number() );
+ }
+ };
+
+ } // namespace ValueStreamTests
+
+ class SubObjectBuilder {
+ public:
+ void run() {
+ BSONObjBuilder b1;
+ b1.append( "a", "bcd" );
+ BSONObjBuilder b2( b1.subobjStart( "foo" ) );
+ b2.append( "ggg", 44.0 );
+ b2.done();
+ b1.append( "f", 10.0 );
+ BSONObj ret = b1.done();
+ ASSERT( ret.valid() );
+ ASSERT( ret.woCompare( fromjson( "{a:'bcd',foo:{ggg:44},f:10}" ) ) == 0 );
+ }
+ };
+
+ class DateBuilder {
+ public:
+ void run() {
+ BSONObj o = BSON("" << Date_t(1234567890));
+ ASSERT( o.firstElement().type() == Date );
+ ASSERT( o.firstElement().date() == Date_t(1234567890) );
+ }
+ };
+
+ class DateNowBuilder {
+ public:
+ void run() {
+ Date_t before = jsTime();
+ BSONObj o = BSON("now" << DATENOW);
+ Date_t after = jsTime();
+
+ ASSERT( o.valid() );
+
+ BSONElement e = o["now"];
+ ASSERT( e.type() == Date );
+ ASSERT( e.date() >= before );
+ ASSERT( e.date() <= after );
+ }
+ };
+
+ class TimeTBuilder {
+ public:
+ void run() {
+ Date_t before = jsTime();
+ sleepmillis(1);
+ time_t now = time(NULL);
+ sleepmillis(1);
+ Date_t after = jsTime();
+
+ BSONObjBuilder b;
+ b.appendTimeT("now", now);
+ BSONObj o = b.obj();
+
+ ASSERT( o.valid() );
+
+ BSONElement e = o["now"];
+ ASSERT( e.type() == Date );
+ ASSERT( e.date()/1000 >= before/1000 );
+ ASSERT( e.date()/1000 <= after/1000 );
+ }
+ };
+
+ class MinMaxKeyBuilder {
+ public:
+ void run() {
+ BSONObj min = BSON( "a" << MINKEY );
+ BSONObj max = BSON( "b" << MAXKEY );
+
+ ASSERT( min.valid() );
+ ASSERT( max.valid() );
+
+ BSONElement minElement = min["a"];
+ BSONElement maxElement = max["b"];
+ ASSERT( minElement.type() == MinKey );
+ ASSERT( maxElement.type() == MaxKey );
+ }
+ };
+
+ class MinMaxElementTest {
+ public:
+
+ BSONObj min( int t ) {
+ BSONObjBuilder b;
+ b.appendMinForType( "a" , t );
+ return b.obj();
+ }
+
+ BSONObj max( int t ) {
+ BSONObjBuilder b;
+ b.appendMaxForType( "a" , t );
+ return b.obj();
+ }
+
+ void run() {
+ for ( int t=1; t<JSTypeMax; t++ ) {
+ stringstream ss;
+ ss << "type: " << t;
+ string s = ss.str();
+ ASSERT( min( t ).woCompare( max( t ) ) <= 0 );
+ ASSERT( max( t ).woCompare( min( t ) ) >= 0 );
+ ASSERT( min( t ).woCompare( min( t ) ) == 0 );
+ ASSERT( max( t ).woCompare( max( t ) ) == 0 );
+ }
+ }
+ };
+
+ class ExtractFieldsTest {
+ public:
+ void run() {
+ BSONObj x = BSON( "a" << 10 << "b" << 11 );
+ assert( BSON( "a" << 10 ).woCompare( x.extractFields( BSON( "a" << 1 ) ) ) == 0 );
+ assert( BSON( "b" << 11 ).woCompare( x.extractFields( BSON( "b" << 1 ) ) ) == 0 );
+ assert( x.woCompare( x.extractFields( BSON( "a" << 1 << "b" << 1 ) ) ) == 0 );
+
+ assert( (string)"a" == x.extractFields( BSON( "a" << 1 << "c" << 1 ) ).firstElementFieldName() );
+ }
+ };
+
+ class ComparatorTest {
+ public:
+ BSONObj one( string s ) {
+ return BSON( "x" << s );
+ }
+ BSONObj two( string x , string y ) {
+ BSONObjBuilder b;
+ b.append( "x" , x );
+ if ( y.size() )
+ b.append( "y" , y );
+ else
+ b.appendNull( "y" );
+ return b.obj();
+ }
+
+ void test( BSONObj order , BSONObj l , BSONObj r , bool wanted ) {
+ BSONObjCmp c( order );
+ bool got = c(l,r);
+ if ( got == wanted )
+ return;
+ cout << " order: " << order << " l: " << l << "r: " << r << " wanted: " << wanted << " got: " << got << endl;
+ }
+
+ void lt( BSONObj order , BSONObj l , BSONObj r ) {
+ test( order , l , r , 1 );
+ }
+
+ void run() {
+ BSONObj s = BSON( "x" << 1 );
+ BSONObj c = BSON( "x" << 1 << "y" << 1 );
+ test( s , one( "A" ) , one( "B" ) , 1 );
+ test( s , one( "B" ) , one( "A" ) , 0 );
+
+ test( c , two( "A" , "A" ) , two( "A" , "B" ) , 1 );
+ test( c , two( "A" , "A" ) , two( "B" , "A" ) , 1 );
+ test( c , two( "B" , "A" ) , two( "A" , "B" ) , 0 );
+
+ lt( c , one("A") , two( "A" , "A" ) );
+ lt( c , one("A") , one( "B" ) );
+ lt( c , two("A","") , two( "B" , "A" ) );
+
+ lt( c , two("B","A") , two( "C" , "A" ) );
+ lt( c , two("B","A") , one( "C" ) );
+ lt( c , two("B","A") , two( "C" , "" ) );
+
+ }
+ };
+
+ namespace external_sort {
+ class Basic1 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
+
+ sorter.add( BSON( "x" << 10 ) , 5 , 1);
+ sorter.add( BSON( "x" << 2 ) , 3 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 6 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 7 , 1 );
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ if ( num == 0 )
+ assert( p.first["x"].number() == 2 );
+ else if ( num <= 2 ) {
+ assert( p.first["x"].number() == 5 );
+ }
+ else if ( num == 3 )
+ assert( p.first["x"].number() == 10 );
+ else
+ ASSERT( 0 );
+ num++;
+ }
+
+
+ ASSERT_EQUALS( 0 , sorter.numFiles() );
+ }
+ };
+
+ class Basic2 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 10 );
+ sorter.add( BSON( "x" << 10 ) , 5 , 11 );
+ sorter.add( BSON( "x" << 2 ) , 3 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 6 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 7 , 1 );
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ if ( num == 0 ) {
+ assert( p.first["x"].number() == 2 );
+ ASSERT_EQUALS( p.second.toString() , "3:1" );
+ }
+ else if ( num <= 2 )
+ assert( p.first["x"].number() == 5 );
+ else if ( num == 3 ) {
+ assert( p.first["x"].number() == 10 );
+ ASSERT_EQUALS( p.second.toString() , "5:b" );
+ }
+ else
+ ASSERT( 0 );
+ num++;
+ }
+
+ }
+ };
+
+ class Basic3 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 10 );
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ assert( ! i->more() );
+
+ }
+ };
+
+
+ class ByDiskLock {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
+ sorter.add( BSON( "x" << 10 ) , 5 , 4);
+ sorter.add( BSON( "x" << 2 ) , 3 , 0 );
+ sorter.add( BSON( "x" << 5 ) , 6 , 2 );
+ sorter.add( BSON( "x" << 5 ) , 7 , 3 );
+ sorter.add( BSON( "x" << 5 ) , 2 , 1 );
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ if ( num == 0 )
+ assert( p.first["x"].number() == 2 );
+ else if ( num <= 3 ) {
+ assert( p.first["x"].number() == 5 );
+ }
+ else if ( num == 4 )
+ assert( p.first["x"].number() == 10 );
+ else
+ ASSERT( 0 );
+ ASSERT_EQUALS( num , p.second.getOfs() );
+ num++;
+ }
+
+
+ }
+ };
+
+
+ class Big1 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 2000 );
+ for ( int i=0; i<10000; i++ ) {
+ sorter.add( BSON( "x" << rand() % 10000 ) , 5 , i );
+ }
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ double prev = 0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ num++;
+ double cur = p.first["x"].number();
+ assert( cur >= prev );
+ prev = cur;
+ }
+ assert( num == 10000 );
+ }
+ };
+
+ class Big2 {
+ public:
+ void run() {
+ const int total = 100000;
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , total * 2 );
+ for ( int i=0; i<total; i++ ) {
+ sorter.add( BSON( "a" << "b" ) , 5 , i );
+ }
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ double prev = 0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ num++;
+ double cur = p.first["x"].number();
+ assert( cur >= prev );
+ prev = cur;
+ }
+ assert( num == total );
+ ASSERT( sorter.numFiles() > 2 );
+ }
+ };
+
+ class D1 {
+ public:
+ void run() {
+
+ BSONObjBuilder b;
+ b.appendNull("");
+ BSONObj x = b.obj();
+
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
+ sorter.add(x, DiskLoc(3,7));
+ sorter.add(x, DiskLoc(4,7));
+ sorter.add(x, DiskLoc(2,7));
+ sorter.add(x, DiskLoc(1,7));
+ sorter.add(x, DiskLoc(3,77));
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ while( i->more() ) {
+ BSONObjExternalSorter::Data d = i->next();
+ /*cout << d.second.toString() << endl;
+ cout << d.first.objsize() << endl;
+ cout<<"SORTER next:" << d.first.toString() << endl;*/
+ }
+ }
+ };
+ }
+
+ class CompatBSON {
+ public:
+
+#define JSONBSONTEST(j,s,m) ASSERT_EQUALS( fromjson( j ).objsize() , s ); ASSERT_EQUALS( fromjson( j ).md5() , m );
+#define RAWBSONTEST(j,s,m) ASSERT_EQUALS( j.objsize() , s ); ASSERT_EQUALS( j.md5() , m );
+
+ void run() {
+
+ JSONBSONTEST( "{ 'x' : true }" , 9 , "6fe24623e4efc5cf07f027f9c66b5456" );
+ JSONBSONTEST( "{ 'x' : null }" , 8 , "12d43430ff6729af501faf0638e68888" );
+ JSONBSONTEST( "{ 'x' : 5.2 }" , 16 , "aaeeac4a58e9c30eec6b0b0319d0dff2" );
+ JSONBSONTEST( "{ 'x' : 'eliot' }" , 18 , "331a3b8b7cbbe0706c80acdb45d4ebbe" );
+ JSONBSONTEST( "{ 'x' : 5.2 , 'y' : 'truth' , 'z' : 1.1 }" , 40 , "7c77b3a6e63e2f988ede92624409da58" );
+ JSONBSONTEST( "{ 'a' : { 'b' : 1.1 } }" , 24 , "31887a4b9d55cd9f17752d6a8a45d51f" );
+ JSONBSONTEST( "{ 'x' : 5.2 , 'y' : { 'a' : 'eliot' , b : true } , 'z' : null }" , 44 , "b3de8a0739ab329e7aea138d87235205" );
+ JSONBSONTEST( "{ 'x' : 5.2 , 'y' : [ 'a' , 'eliot' , 'b' , true ] , 'z' : null }" , 62 , "cb7bad5697714ba0cbf51d113b6a0ee8" );
+
+ RAWBSONTEST( BSON( "x" << 4 ) , 12 , "d1ed8dbf79b78fa215e2ded74548d89d" );
+
+ }
+ };
+
+ class CompareDottedFieldNamesTest {
+ public:
+ void t( FieldCompareResult res , const string& l , const string& r ) {
+ ASSERT_EQUALS( res , compareDottedFieldNames( l , r ) );
+ ASSERT_EQUALS( -1 * res , compareDottedFieldNames( r , l ) );
+ }
+
+ void run() {
+ t( SAME , "x" , "x" );
+ t( SAME , "x.a" , "x.a" );
+ t( LEFT_BEFORE , "a" , "b" );
+ t( RIGHT_BEFORE , "b" , "a" );
+
+ t( LEFT_SUBFIELD , "a.x" , "a" );
+ }
+ };
+
+ struct NestedDottedConversions {
+ void t(const BSONObj& nest, const BSONObj& dot) {
+ ASSERT_EQUALS( nested2dotted(nest), dot);
+ ASSERT_EQUALS( nest, dotted2nested(dot));
+ }
+
+ void run() {
+ t( BSON("a" << BSON("b" << 1)), BSON("a.b" << 1) );
+ t( BSON("a" << BSON("b" << 1 << "c" << 1)), BSON("a.b" << 1 << "a.c" << 1) );
+ t( BSON("a" << BSON("b" << 1 << "c" << 1) << "d" << 1), BSON("a.b" << 1 << "a.c" << 1 << "d" << 1) );
+ t( BSON("a" << BSON("b" << 1 << "c" << 1 << "e" << BSON("f" << 1)) << "d" << 1), BSON("a.b" << 1 << "a.c" << 1 << "a.e.f" << 1 << "d" << 1) );
+ }
+ };
+
+ struct BSONArrayBuilderTest {
+ void run() {
+ int i = 0;
+ BSONObjBuilder objb;
+ BSONArrayBuilder arrb;
+
+ objb << objb.numStr(i++) << 100;
+ arrb << 100;
+
+ objb << objb.numStr(i++) << 1.0;
+ arrb << 1.0;
+
+ objb << objb.numStr(i++) << "Hello";
+ arrb << "Hello";
+
+ objb << objb.numStr(i++) << string("World");
+ arrb << string("World");
+
+ objb << objb.numStr(i++) << BSON( "a" << 1 << "b" << "foo" );
+ arrb << BSON( "a" << 1 << "b" << "foo" );
+
+ objb << objb.numStr(i++) << BSON( "a" << 1)["a"];
+ arrb << BSON( "a" << 1)["a"];
+
+ OID oid;
+ oid.init();
+ objb << objb.numStr(i++) << oid;
+ arrb << oid;
+
+ BSONObj obj = objb.obj();
+ BSONArray arr = arrb.arr();
+
+ ASSERT_EQUALS(obj, arr);
+
+ BSONObj o = BSON( "obj" << obj << "arr" << arr << "arr2" << BSONArray(obj) );
+ keyTest(o);
+
+ ASSERT_EQUALS(o["obj"].type(), Object);
+ ASSERT_EQUALS(o["arr"].type(), Array);
+ ASSERT_EQUALS(o["arr2"].type(), Array);
+ }
+ };
+
+ struct ArrayMacroTest {
+ void run() {
+ BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+ BSONObj obj = BSON( "0" << "hello"
+ << "1" << 1
+ << "2" << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+
+ ASSERT_EQUALS(arr, obj);
+ ASSERT_EQUALS(arr["2"].type(), Object);
+ ASSERT_EQUALS(arr["2"].embeddedObject()["foo"].type(), Array);
+ }
+ };
+
+ class NumberParsing {
+ public:
+ void run() {
+ BSONObjBuilder a;
+ BSONObjBuilder b;
+
+ a.append( "a" , (int)1 );
+ ASSERT( b.appendAsNumber( "a" , "1" ) );
+
+ a.append( "b" , 1.1 );
+ ASSERT( b.appendAsNumber( "b" , "1.1" ) );
+
+ a.append( "c" , (int)-1 );
+ ASSERT( b.appendAsNumber( "c" , "-1" ) );
+
+ a.append( "d" , -1.1 );
+ ASSERT( b.appendAsNumber( "d" , "-1.1" ) );
+
+ a.append( "e" , (long long)32131231231232313LL );
+ ASSERT( b.appendAsNumber( "e" , "32131231231232313" ) );
+
+ ASSERT( ! b.appendAsNumber( "f" , "zz" ) );
+ ASSERT( ! b.appendAsNumber( "f" , "5zz" ) );
+ ASSERT( ! b.appendAsNumber( "f" , "zz5" ) );
+
+ ASSERT_EQUALS( a.obj() , b.obj() );
+ }
+ };
+
+ class bson2settest {
+ public:
+ void run() {
+ BSONObj o = BSON( "z" << 1 << "a" << 2 << "m" << 3 << "c" << 4 );
+ BSONObjIteratorSorted i( o );
+ stringstream ss;
+ while ( i.more() )
+ ss << i.next().fieldName();
+ ASSERT_EQUALS( "acmz" , ss.str() );
+
+ {
+ Timer t;
+ for ( int i=0; i<10000; i++ ) {
+ BSONObjIteratorSorted j( o );
+ int l = 0;
+ while ( j.more() )
+ l += strlen( j.next().fieldName() );
+ }
+ //unsigned long long tm = t.micros();
+ //cout << "time: " << tm << endl;
+ }
+ }
+
+ };
+
+ class checkForStorageTests {
+ public:
+
+ void good( string s ) {
+ BSONObj o = fromjson( s );
+ if ( o.okForStorage() )
+ return;
+ throw UserException( 12528 , (string)"should be ok for storage:" + s );
+ }
+
+ void bad( string s ) {
+ BSONObj o = fromjson( s );
+ if ( ! o.okForStorage() )
+ return;
+ throw UserException( 12529 , (string)"should NOT be ok for storage:" + s );
+ }
+
+ void run() {
+ good( "{x:1}" );
+ bad( "{'x.y':1}" );
+
+ good( "{x:{a:2}}" );
+ bad( "{x:{'$a':2}}" );
+ }
+ };
+
+ class InvalidIDFind {
+ public:
+ void run() {
+ BSONObj x = BSON( "_id" << 5 << "t" << 2 );
+ {
+ char * crap = (char*)malloc( x.objsize() );
+ memcpy( crap , x.objdata() , x.objsize() );
+ BSONObj y( crap );
+ ASSERT_EQUALS( x , y );
+ free( crap );
+ }
+
+ {
+ char * crap = (char*)malloc( x.objsize() );
+ memcpy( crap , x.objdata() , x.objsize() );
+ int * foo = (int*)crap;
+ foo[0] = 123123123;
+ int state = 0;
+ try {
+ BSONObj y( crap );
+ state = 1;
+ }
+ catch ( std::exception& e ) {
+ state = 2;
+ ASSERT( strstr( e.what() , "_id: 5" ) > 0 );
+ }
+ free( crap );
+ ASSERT_EQUALS( 2 , state );
+ }
+
+
+ }
+ };
+
+ class ElementSetTest {
+ public:
+ void run() {
+ BSONObj x = BSON( "a" << 1 << "b" << 1 << "c" << 2 );
+ BSONElement a = x["a"];
+ BSONElement b = x["b"];
+ BSONElement c = x["c"];
+ //cout << "c: " << c << endl;
+ ASSERT( a.woCompare( b ) != 0 );
+ ASSERT( a.woCompare( b , false ) == 0 );
+
+ BSONElementSet s;
+ s.insert( a );
+ ASSERT_EQUALS( 1U , s.size() );
+ s.insert( b );
+ ASSERT_EQUALS( 1U , s.size() );
+ ASSERT( ! s.count( c ) );
+
+ ASSERT( s.find( a ) != s.end() );
+ ASSERT( s.find( b ) != s.end() );
+ ASSERT( s.find( c ) == s.end() );
+
+
+ s.insert( c );
+ ASSERT_EQUALS( 2U , s.size() );
+
+
+ ASSERT( s.find( a ) != s.end() );
+ ASSERT( s.find( b ) != s.end() );
+ ASSERT( s.find( c ) != s.end() );
+
+ ASSERT( s.count( a ) );
+ ASSERT( s.count( b ) );
+ ASSERT( s.count( c ) );
+
+ {
+ BSONElementSet x;
+ BSONObj o = fromjson( "{ 'a' : [ 1 , 2 , 1 ] }" );
+ BSONObjIterator i( o["a"].embeddedObjectUserCheck() );
+ while ( i.more() ) {
+ x.insert( i.next() );
+ }
+ ASSERT_EQUALS( 2U , x.size() );
+ }
+ }
+ };
+
+ class EmbeddedNumbers {
+ public:
+ void run() {
+ BSONObj x = BSON( "a" << BSON( "b" << 1 ) );
+ BSONObj y = BSON( "a" << BSON( "b" << 1.0 ) );
+ keyTest(x); keyTest(y);
+ ASSERT_EQUALS( x , y );
+ ASSERT_EQUALS( 0 , x.woCompare( y ) );
+ }
+ };
+
+ class BuilderPartialItearte {
+ public:
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.append( "x" , 1 );
+ b.append( "y" , 2 );
+
+ BSONObjIterator i = b.iterator();
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 1 , i.next().numberInt() );
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 2 , i.next().numberInt() );
+ ASSERT( ! i.more() );
+
+ b.append( "z" , 3 );
+
+ i = b.iterator();
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 1 , i.next().numberInt() );
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 2 , i.next().numberInt() );
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 3 , i.next().numberInt() );
+ ASSERT( ! i.more() );
+
+ ASSERT_EQUALS( BSON( "x" << 1 << "y" << 2 << "z" << 3 ) , b.obj() );
+ }
+
+ }
+ };
+
+ class BSONFieldTests {
+ public:
+ void run() {
+ {
+ BSONField<int> x("x");
+ BSONObj o = BSON( x << 5 );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , o );
+ }
+
+ {
+ BSONField<int> x("x");
+ BSONObj o = BSON( x.make(5) );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , o );
+ }
+
+ {
+ BSONField<int> x("x");
+ BSONObj o = BSON( x(5) );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , o );
+
+ o = BSON( x.gt(5) );
+ ASSERT_EQUALS( BSON( "x" << BSON( "$gt" << 5 ) ) , o );
+ }
+
+ }
+ };
+
+ class BSONForEachTest {
+ public:
+ void run() {
+ BSONObj obj = BSON("a" << 1 << "a" << 2 << "a" << 3);
+
+ int count = 0;
+ BSONForEach(e, obj) {
+ ASSERT_EQUALS( e.fieldName() , string("a") );
+ count += e.Int();
+ }
+
+ ASSERT_EQUALS( count , 1+2+3 );
+ }
+ };
+
+ class StringDataTest {
+ public:
+ void run() {
+ StringData a( string( "aaa" ) );
+ ASSERT_EQUALS( 3u , a.size() );
+
+ StringData b( string( "bbb" ).c_str() );
+ ASSERT_EQUALS( 3u , b.size() );
+
+ StringData c( "ccc", StringData::LiteralTag() );
+ ASSERT_EQUALS( 3u , c.size() );
+
+ // TODO update test when second parm takes StringData too
+ BSONObjBuilder builder;
+ builder.append( c, "value");
+ ASSERT_EQUALS( builder.obj() , BSON( c.data() << "value" ) );
+
+ }
+ };
+
+ class CompareOps {
+ public:
+ void run() {
+
+ BSONObj a = BSON("a"<<1);
+ BSONObj b = BSON("a"<<1);
+ BSONObj c = BSON("a"<<2);
+ BSONObj d = BSON("a"<<3);
+ BSONObj e = BSON("a"<<4);
+ BSONObj f = BSON("a"<<4);
+
+ ASSERT( ! ( a < b ) );
+ ASSERT( a <= b );
+ ASSERT( a < c );
+
+ ASSERT( f > d );
+ ASSERT( f >= e );
+ ASSERT( ! ( f > e ) );
+ }
+ };
+
+ class HashingTest {
+ public:
+ void run() {
+ int N = 100000;
+ BSONObj x = BSON( "name" << "eliot was here"
+ << "x" << 5
+ << "asdasdasdas" << "asldkasldjasldjasldjlasjdlasjdlasdasdasdasdasdasdasd" );
+
+ {
+ //Timer t;
+ for ( int i=0; i<N; i++ )
+ x.md5();
+ //int millis = t.millis();
+ //cout << "md5 : " << millis << endl;
+ }
+
+ {
+ //Timer t;
+ for ( int i=0; i<N; i++ )
+ x.toString();
+ //int millis = t.millis();
+ //cout << "toString : " << millis << endl;
+ }
+
+ {
+ //Timer t;
+ for ( int i=0; i<N; i++ )
+ checksum( x.objdata() , x.objsize() );
+ //int millis = t.millis();
+ //cout << "checksum : " << millis << endl;
+ }
+
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "jsobj" ) {
+ }
+
+ void setupTests() {
+ add< BufBuilderBasic >();
+ add< BSONElementBasic >();
+ add< BSONObjTests::NullString >();
+ add< BSONObjTests::Create >();
+ add< BSONObjTests::WoCompareBasic >();
+ add< BSONObjTests::NumericCompareBasic >();
+ add< BSONObjTests::WoCompareEmbeddedObject >();
+ add< BSONObjTests::WoCompareEmbeddedArray >();
+ add< BSONObjTests::WoCompareOrdered >();
+ add< BSONObjTests::WoCompareDifferentLength >();
+ add< BSONObjTests::WoSortOrder >();
+ add< BSONObjTests::MultiKeySortOrder > ();
+ add< BSONObjTests::TimestampTest >();
+ add< BSONObjTests::Nan >();
+ add< BSONObjTests::AsTempObj >();
+ add< BSONObjTests::AppendIntOrLL >();
+ add< BSONObjTests::AppendNumber >();
+ add< BSONObjTests::ToStringArray >();
+ add< BSONObjTests::ToStringNumber >();
+ add< BSONObjTests::AppendAs >();
+ add< BSONObjTests::ArrayAppendAs >();
+ add< BSONObjTests::GetField >();
+
+ add< BSONObjTests::Validation::BadType >();
+ add< BSONObjTests::Validation::EooBeforeEnd >();
+ add< BSONObjTests::Validation::Undefined >();
+ add< BSONObjTests::Validation::TotalSizeTooSmall >();
+ add< BSONObjTests::Validation::EooMissing >();
+ add< BSONObjTests::Validation::WrongStringSize >();
+ add< BSONObjTests::Validation::ZeroStringSize >();
+ add< BSONObjTests::Validation::NegativeStringSize >();
+ add< BSONObjTests::Validation::WrongSubobjectSize >();
+ add< BSONObjTests::Validation::WrongDbrefNsSize >();
+ add< BSONObjTests::Validation::NoFieldNameEnd >();
+ add< BSONObjTests::Validation::BadRegex >();
+ add< BSONObjTests::Validation::BadRegexOptions >();
+ add< BSONObjTests::Validation::CodeWScopeSmallSize >();
+ add< BSONObjTests::Validation::CodeWScopeZeroStrSize >();
+ add< BSONObjTests::Validation::CodeWScopeSmallStrSize >();
+ add< BSONObjTests::Validation::CodeWScopeNoSizeForObj >();
+ add< BSONObjTests::Validation::CodeWScopeSmallObjSize >();
+ add< BSONObjTests::Validation::CodeWScopeBadObject >();
+ add< BSONObjTests::Validation::NoSize >( Symbol );
+ add< BSONObjTests::Validation::NoSize >( Code );
+ add< BSONObjTests::Validation::NoSize >( String );
+ add< BSONObjTests::Validation::NoSize >( CodeWScope );
+ add< BSONObjTests::Validation::NoSize >( DBRef );
+ add< BSONObjTests::Validation::NoSize >( Object );
+ add< BSONObjTests::Validation::NoSize >( Array );
+ add< BSONObjTests::Validation::NoSize >( BinData );
+ add< BSONObjTests::Validation::Fuzz >( .5 );
+ add< BSONObjTests::Validation::Fuzz >( .1 );
+ add< BSONObjTests::Validation::Fuzz >( .05 );
+ add< BSONObjTests::Validation::Fuzz >( .01 );
+ add< BSONObjTests::Validation::Fuzz >( .001 );
+ add< OIDTests::init1 >();
+ add< OIDTests::initParse1 >();
+ add< OIDTests::append >();
+ add< OIDTests::increasing >();
+ add< OIDTests::ToDate >();
+ add< OIDTests::FromDate >();
+ add< ValueStreamTests::LabelBasic >();
+ add< ValueStreamTests::LabelShares >();
+ add< ValueStreamTests::LabelDouble >();
+ add< ValueStreamTests::LabelDoubleShares >();
+ add< ValueStreamTests::LabelSize >();
+ add< ValueStreamTests::LabelMulti >();
+ add< ValueStreamTests::LabelishOr >();
+ add< ValueStreamTests::Unallowed >();
+ add< ValueStreamTests::ElementAppend >();
+ add< ValueStreamTests::Unallowed >();
+ add< ValueStreamTests::ElementAppend >();
+ add< SubObjectBuilder >();
+ add< DateBuilder >();
+ add< DateNowBuilder >();
+ add< TimeTBuilder >();
+ add< MinMaxKeyBuilder >();
+ add< MinMaxElementTest >();
+ add< ComparatorTest >();
+ add< ExtractFieldsTest >();
+ add< external_sort::Basic1 >();
+ add< external_sort::Basic2 >();
+ add< external_sort::Basic3 >();
+ add< external_sort::ByDiskLock >();
+ add< external_sort::Big1 >();
+ add< external_sort::Big2 >();
+ add< external_sort::D1 >();
+ add< CompatBSON >();
+ add< CompareDottedFieldNamesTest >();
+ add< NestedDottedConversions >();
+ add< BSONArrayBuilderTest >();
+ add< ArrayMacroTest >();
+ add< NumberParsing >();
+ add< bson2settest >();
+ add< checkForStorageTests >();
+ add< InvalidIDFind >();
+ add< ElementSetTest >();
+ add< EmbeddedNumbers >();
+ add< BuilderPartialItearte >();
+ add< BSONFieldTests >();
+ add< BSONForEachTest >();
+ add< StringDataTest >();
+ add< CompareOps >();
+ add< HashingTest >();
+ }
+ } myall;
+
+} // namespace JsobjTests
+
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
new file mode 100644
index 00000000000..36c204a1011
--- /dev/null
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -0,0 +1,1185 @@
+// jsontests.cpp - Tests for json.{h,cpp} code and BSONObj::jsonString()
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/jsobj.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+#include <limits>
+
+namespace JsonTests {
+ namespace JsonStringTests {
+
+ class Empty {
+ public:
+ void run() {
+ ASSERT_EQUALS( "{}", BSONObj().jsonString( Strict ) );
+ }
+ };
+
+ class SingleStringMember {
+ public:
+ void run() {
+ ASSERT_EQUALS( "{ \"a\" : \"b\" }", BSON( "a" << "b" ).jsonString( Strict ) );
+ }
+ };
+
+ class EscapedCharacters {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\" \\ / \b \f \n \r \t" );
+ ASSERT_EQUALS( "{ \"a\" : \"\\\" \\\\ / \\b \\f \\n \\r \\t\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ // per http://www.ietf.org/rfc/rfc4627.txt, control characters are
+ // (U+0000 through U+001F). U+007F is not mentioned as a control character.
+ class AdditionalControlCharacters {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\x1 \x1f" );
+ ASSERT_EQUALS( "{ \"a\" : \"\\u0001 \\u001f\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class ExtendedAscii {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\x80" );
+ ASSERT_EQUALS( "{ \"a\" : \"\x80\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class EscapeFieldName {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "\t", "b" );
+ ASSERT_EQUALS( "{ \"\\t\" : \"b\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleIntMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ ASSERT_EQUALS( "{ \"a\" : 1 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleNumberMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1.5 );
+ ASSERT_EQUALS( "{ \"a\" : 1.5 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class InvalidNumbers {
+ public:
+ void run() {
+ BSONObjBuilder c;
+ c.append( "a", numeric_limits< double >::quiet_NaN() );
+ string s = c.done().jsonString( Strict );
+ // Note there is no NaN in the JSON RFC but what would be the alternative?
+ ASSERT( str::contains(s, "NaN") );
+
+ // commented out assertion as it doesn't throw anymore:
+ //ASSERT_THROWS( c.done().jsonString( Strict ), AssertionException );
+
+ BSONObjBuilder d;
+ d.append( "a", numeric_limits< double >::signaling_NaN() );
+ //ASSERT_THROWS( d.done().jsonString( Strict ), AssertionException );
+ s = d.done().jsonString( Strict );
+ ASSERT( str::contains(s, "NaN") );
+ }
+ };
+
+ class NumberPrecision {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 123456789 );
+ ASSERT_EQUALS( "{ \"a\" : 123456789 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class NegativeNumber {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", -1 );
+ ASSERT_EQUALS( "{ \"a\" : -1 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleBoolMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendBool( "a", true );
+ ASSERT_EQUALS( "{ \"a\" : true }", b.done().jsonString( Strict ) );
+
+ BSONObjBuilder c;
+ c.appendBool( "a", false );
+ ASSERT_EQUALS( "{ \"a\" : false }", c.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleNullMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ ASSERT_EQUALS( "{ \"a\" : null }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleObjectMember {
+ public:
+ void run() {
+ BSONObjBuilder b, c;
+ b.append( "a", c.done() );
+ ASSERT_EQUALS( "{ \"a\" : {} }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class TwoMembers {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.append( "b", 2 );
+ ASSERT_EQUALS( "{ \"a\" : 1, \"b\" : 2 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class EmptyArray {
+ public:
+ void run() {
+ vector< int > arr;
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ ASSERT_EQUALS( "{ \"a\" : [] }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class Array {
+ public:
+ void run() {
+ vector< int > arr;
+ arr.push_back( 1 );
+ arr.push_back( 2 );
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ ASSERT_EQUALS( "{ \"a\" : [ 1, 2 ] }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class DBRef {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0xff, 12 );
+ BSONObjBuilder b;
+ b.appendDBRef( "a", "namespace", oid );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString( JS ) );
+ ASSERT_EQUALS( "{ \"a\" : Dbref( \"namespace\", \"ffffffffffffffffffffffff\" ) }",
+ built.jsonString( TenGen ) );
+ }
+ };
+
+ class DBRefZero {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0, 12 );
+ BSONObjBuilder b;
+ b.appendDBRef( "a", "namespace", oid );
+ ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"000000000000000000000000\" } }",
+ b.done().jsonString( Strict ) );
+ }
+ };
+
+ class ObjectId {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0xff, 12 );
+ BSONObjBuilder b;
+ b.appendOID( "a", &oid );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$oid\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : ObjectId( \"ffffffffffffffffffffffff\" ) }",
+ built.jsonString( TenGen ) );
+ }
+ };
+
+ class BinData {
+ public:
+ void run() {
+ char z[ 3 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ z[ 2 ] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 3, BinDataGeneral, z );
+
+ string o = b.done().jsonString( Strict );
+
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }",
+ o );
+
+ BSONObjBuilder c;
+ c.appendBinData( "a", 2, BinDataGeneral, z );
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }",
+ c.done().jsonString( Strict ) );
+
+ BSONObjBuilder d;
+ d.appendBinData( "a", 1, BinDataGeneral, z );
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }",
+ d.done().jsonString( Strict ) );
+ }
+ };
+
+ class Symbol {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendSymbol( "a", "b" );
+ ASSERT_EQUALS( "{ \"a\" : \"b\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class Date {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendDate( "a", 0 );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$date\" : 0 } }", built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", built.jsonString( JS ) );
+ }
+ };
+
+ class Regex {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "abc", "i" );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"abc\", \"$options\" : \"i\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( JS ) );
+ }
+ };
+
+ class RegexEscape {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "/\"", "i" );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"/\\\"\", \"$options\" : \"i\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( JS ) );
+ }
+ };
+
+ class RegexManyOptions {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "z", "abcgimx" );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"z\", \"$options\" : \"abcgimx\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( JS ) );
+ }
+ };
+
+ class CodeTests {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendCode( "x" , "function(){ return 1; }" );
+ BSONObj o = b.obj();
+ ASSERT_EQUALS( "{ \"x\" : function(){ return 1; } }" , o.jsonString() );
+ }
+ };
+
+ class TimestampTests {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendTimestamp( "x" , 4000 , 10 );
+ BSONObj o = b.obj();
+ ASSERT_EQUALS( "{ \"x\" : { \"t\" : 4000 , \"i\" : 10 } }" , o.jsonString() );
+ }
+ };
+
+ class NullString {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "x" , "a\0b" , 4 );
+ BSONObj o = b.obj();
+ ASSERT_EQUALS( "{ \"x\" : \"a\\u0000b\" }" , o.jsonString() );
+ }
+ };
+
+ class AllTypes {
+ public:
+ void run() {
+ OID oid;
+ oid.init();
+
+ BSONObjBuilder b;
+ b.appendMinKey( "a" );
+ b.append( "b" , 5.5 );
+ b.append( "c" , "abc" );
+ b.append( "e" , BSON( "x" << 1 ) );
+ b.append( "f" , BSON_ARRAY( 1 << 2 << 3 ) );
+ b.appendBinData( "g" , 5 , bdtCustom , (const char*)this );
+ b.appendUndefined( "h" );
+ b.append( "i" , oid );
+ b.appendBool( "j" , 1 );
+ b.appendDate( "k" , 123 );
+ b.appendNull( "l" );
+ b.appendRegex( "m" , "a" );
+ b.appendDBRef( "n" , "foo" , oid );
+ b.appendCode( "o" , "function(){}" );
+ b.appendSymbol( "p" , "foo" );
+ b.appendCodeWScope( "q" , "function(){}" , BSON("x" << 1 ) );
+ b.append( "r" , (int)5 );
+ b.appendTimestamp( "s" , 123123123123123LL );
+ b.append( "t" , 12321312312LL );
+ b.appendMaxKey( "u" );
+
+ BSONObj o = b.obj();
+ o.jsonString();
+ //cout << o.jsonString() << endl;
+ }
+ };
+
+ } // namespace JsonStringTests
+
+ namespace FromJsonTests {
+
+ class Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ ASSERT( fromjson( json() ).valid() );
+ assertEquals( bson(), fromjson( json() ) );
+ assertEquals( bson(), fromjson( bson().jsonString( Strict ) ) );
+ assertEquals( bson(), fromjson( bson().jsonString( TenGen ) ) );
+ assertEquals( bson(), fromjson( bson().jsonString( JS ) ) );
+ }
+ protected:
+ virtual BSONObj bson() const = 0;
+ virtual string json() const = 0;
+ private:
+ static void assertEquals( const BSONObj &expected, const BSONObj &actual ) {
+ if ( expected.woCompare( actual ) ) {
+ out() << "want:" << expected.jsonString() << " size: " << expected.objsize() << endl;
+ out() << "got :" << actual.jsonString() << " size: " << actual.objsize() << endl;
+ out() << expected.hexDump() << endl;
+ out() << actual.hexDump() << endl;
+ }
+ ASSERT( !expected.woCompare( actual ) );
+ }
+ };
+
+ class Bad {
+ public:
+ virtual ~Bad() {}
+ void run() {
+ ASSERT_THROWS( fromjson( json() ), MsgAssertionException );
+ }
+ protected:
+ virtual string json() const = 0;
+ };
+
+ class Empty : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{}";
+ }
+ };
+
+ class EmptyWithSpace : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ }";
+ }
+ };
+
+ class SingleString : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "b" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"b\" }";
+ }
+ };
+
+ class EmptyStrings : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "", "" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\" : \"\" }";
+ }
+ };
+
+ class ReservedFieldName : public Bad {
+ virtual string json() const {
+ return "{ \"$oid\" : \"b\" }";
+ }
+ };
+
+ class OkDollarFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "$where", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"$where\" : 1 }";
+ }
+ };
+
+ class SingleNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1 }";
+ }
+ };
+
+ class RealNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", strtod( "0.7", 0 ) );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 0.7 }";
+ }
+ };
+
+ class FancyNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", strtod( "-4.4433e-2", 0 ) );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : -4.4433e-2 }";
+ }
+ };
+
+ class TwoElements : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.append( "b", "foo" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1, \"b\" : \"foo\" }";
+ }
+ };
+
+ class Subobject : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ BSONObjBuilder c;
+ c.append( "z", b.done() );
+ return c.obj();
+ }
+ virtual string json() const {
+ return "{ \"z\" : { \"a\" : 1 } }";
+ }
+ };
+
+ class ArrayEmpty : public Base {
+ virtual BSONObj bson() const {
+ vector< int > arr;
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [] }";
+ }
+ };
+
+ class Array : public Base {
+ virtual BSONObj bson() const {
+ vector< int > arr;
+ arr.push_back( 1 );
+ arr.push_back( 2 );
+ arr.push_back( 3 );
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [ 1, 2, 3 ] }";
+ }
+ };
+
+ class True : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool( "a", true );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : true }";
+ }
+ };
+
+ class False : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool( "a", false );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : false }";
+ }
+ };
+
+ class Null : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : null }";
+ }
+ };
+
+ class EscapedCharacters : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "\" \\ / \b \f \n \r \t \v" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t \\v\" }";
+ }
+ };
+
+ class NonEscapedCharacters : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "% { a z $ # ' " );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\% \\{ \\a \\z \\$ \\# \\' \\ \" }";
+ }
+ };
+
+ class AllowedControlCharacter : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "\x7f" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\x7f\" }";
+ }
+ };
+
+ class EscapeFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "\n", "b" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\\n\" : \"b\" }";
+ }
+ };
+
+ class EscapedUnicodeToUtf8 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[ 7 ];
+ u[ 0 ] = 0xe0 | 0x0a;
+ u[ 1 ] = 0x80;
+ u[ 2 ] = 0x80;
+ u[ 3 ] = 0xe0 | 0x0a;
+ u[ 4 ] = 0x80;
+ u[ 5 ] = 0x80;
+ u[ 6 ] = 0;
+ b.append( "a", (char *) u );
+ BSONObj built = b.obj();
+ ASSERT_EQUALS( string( (char *) u ), built.firstElement().valuestr() );
+ return built;
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\ua000\\uA000\" }";
+ }
+ };
+
+ class Utf8AllOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[ 8 ];
+ u[ 0 ] = 0x01;
+
+ u[ 1 ] = 0x7f;
+
+ u[ 2 ] = 0xdf;
+ u[ 3 ] = 0xbf;
+
+ u[ 4 ] = 0xef;
+ u[ 5 ] = 0xbf;
+ u[ 6 ] = 0xbf;
+
+ u[ 7 ] = 0;
+
+ b.append( "a", (char *) u );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
+ }
+ };
+
+ class Utf8FirstByteOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[ 6 ];
+ u[ 0 ] = 0xdc;
+ u[ 1 ] = 0x80;
+
+ u[ 2 ] = 0xef;
+ u[ 3 ] = 0xbc;
+ u[ 4 ] = 0x80;
+
+ u[ 5 ] = 0;
+
+ b.append( "a", (char *) u );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0700\\uff00\" }";
+ }
+ };
+
+ class DBRef : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset( &o, 0, 12 );
+ b.appendDBRef( "a", "foo", o );
+ return b.obj();
+ }
+ // NOTE Testing other formats handled by by Base class.
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"foo\", \"$id\" : \"000000000000000000000000\" } }";
+ }
+ };
+
+ class NewDBRef : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset( &o, 0, 12 );
+ b.append( "$ref", "items" );
+ b.appendOID( "$id", &o );
+ BSONObjBuilder c;
+ c.append( "refval", b.done() );
+ return c.obj();
+ }
+ virtual string json() const {
+ return "{ \"refval\" : { \"$ref\" : \"items\", \"$id\" : ObjectId( \"000000000000000000000000\" ) } }";
+ }
+ };
+
+ class Oid : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendOID( "_id" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : { \"$oid\" : \"000000000000000000000000\" } }";
+ }
+ };
+
+ class Oid2 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset( &o, 0x0f, 12 );
+ b.appendOID( "_id", &o );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0f\" ) }";
+ }
+ };
+
+ class StringId : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("_id", "000000000000000000000000");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : \"000000000000000000000000\" }";
+ }
+ };
+
+ class BinData : public Base {
+ virtual BSONObj bson() const {
+ char z[ 3 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ z[ 2 ] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 3, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class BinDataPaddedSingle : public Base {
+ virtual BSONObj bson() const {
+ char z[ 2 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 2, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class BinDataPaddedDouble : public Base {
+ virtual BSONObj bson() const {
+ char z[ 1 ];
+ z[ 0 ] = 'a';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 1, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class BinDataAllChars : public Base {
+ virtual BSONObj bson() const {
+ unsigned char z[] = {
+ 0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
+ };
+ BSONObjBuilder b;
+ b.appendBinData( "a", 48, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class Date : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate( "a", 0 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 0 } }";
+ }
+ };
+
+ class DateNonzero : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate( "a", 100 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 100 } }";
+ }
+ };
+
+ class DateTooLong : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : " << ~(0LL) << "0" << " } }";
+ return ss.str();
+ }
+ };
+
+ class Regex : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "b", "i" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"i\" } }";
+ }
+ };
+
+ class RegexEscape : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "\t", "i" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"\\t\", \"$options\" : \"i\" } }";
+ }
+ };
+
+ class RegexWithQuotes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "\"", "" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : /\"/ }";
+ }
+ };
+
+ class RegexInvalidOption : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"1\" } }";
+ }
+ };
+
+ class RegexInvalidOption2 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : /b/c }";
+ }
+ };
+
+ class Malformed : public Bad {
+ string json() const {
+ return "{";
+ }
+ };
+
+ class UnquotedFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a_b", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ a_b : 1 }";
+ }
+ };
+
+ class UnquotedFieldNameDollar : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "$a_b", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ $a_b : 1 }";
+ }
+ };
+
+ class SingleQuotes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "ab'c\"", "bb\b '\"" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ 'ab\\'c\"' : 'bb\\b \\'\"' }";
+ }
+ };
+
+ class ObjectId : public Base {
+ virtual BSONObj bson() const {
+ OID id;
+ id.init( "deadbeeff00ddeadbeeff00d" );
+ BSONObjBuilder b;
+ b.appendOID( "_id", &id );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
+ }
+ };
+
+ class ObjectId2 : public Base {
+ virtual BSONObj bson() const {
+ OID id;
+ id.init( "deadbeeff00ddeadbeeff00d" );
+ BSONObjBuilder b;
+ b.appendOID( "foo", &id );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"foo\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
+ }
+ };
+
+ class NumericTypes : public Base {
+ public:
+ void run() {
+ Base::run();
+
+ BSONObj o = fromjson(json());
+
+ ASSERT(o["int"].type() == NumberInt);
+ ASSERT(o["long"].type() == NumberLong);
+ ASSERT(o["double"].type() == NumberDouble);
+
+ ASSERT(o["long"].numberLong() == 9223372036854775807ll);
+ }
+
+ virtual BSONObj bson() const {
+ return BSON( "int" << 123
+ << "long" << 9223372036854775807ll // 2**63 - 1
+ << "double" << 3.14
+ );
+ }
+ virtual string json() const {
+ return "{ \"int\": 123, \"long\": 9223372036854775807, \"double\": 3.14 }";
+ }
+ };
+
+ class NegativeNumericTypes : public Base {
+ public:
+ void run() {
+ Base::run();
+
+ BSONObj o = fromjson(json());
+
+ ASSERT(o["int"].type() == NumberInt);
+ ASSERT(o["long"].type() == NumberLong);
+ ASSERT(o["double"].type() == NumberDouble);
+
+ ASSERT(o["long"].numberLong() == -9223372036854775807ll);
+ }
+
+ virtual BSONObj bson() const {
+ return BSON( "int" << -123
+ << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
+ << "double" << -3.14
+ );
+ }
+ virtual string json() const {
+ return "{ \"int\": -123, \"long\": -9223372036854775807, \"double\": -3.14 }";
+ }
+ };
+
+ class EmbeddedDatesBase : public Base {
+ public:
+
+ virtual void run() {
+ BSONObj o = fromjson( json() );
+ ASSERT_EQUALS( 3 , (o["time.valid"].type()) );
+ BSONObj e = o["time.valid"].embeddedObjectUserCheck();
+ ASSERT_EQUALS( 9 , e["$gt"].type() );
+ ASSERT_EQUALS( 9 , e["$lt"].type() );
+ Base::run();
+ }
+
+ BSONObj bson() const {
+ BSONObjBuilder e;
+ e.appendDate( "$gt" , 1257829200000LL );
+ e.appendDate( "$lt" , 1257829200100LL );
+
+ BSONObjBuilder b;
+ b.append( "time.valid" , e.obj() );
+ return b.obj();
+ }
+ virtual string json() const = 0;
+ };
+
+ struct EmbeddedDatesFormat1 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : { \"$date\" : 1257829200000 } , $lt : { \"$date\" : 1257829200100 } } }";
+ }
+ };
+ struct EmbeddedDatesFormat2 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : Date(1257829200000) , $lt : Date( 1257829200100 ) } }";
+ }
+ };
+ struct EmbeddedDatesFormat3 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : new Date(1257829200000) , $lt : new Date( 1257829200100 ) } }";
+ }
+ };
+
+ class NullString : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "x" , "a\0b" , 4 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"x\" : \"a\\u0000b\" }";
+ }
+ };
+
+ } // namespace FromJsonTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "json" ) {
+ }
+
+ void setupTests() {
+ add< JsonStringTests::Empty >();
+ add< JsonStringTests::SingleStringMember >();
+ add< JsonStringTests::EscapedCharacters >();
+ add< JsonStringTests::AdditionalControlCharacters >();
+ add< JsonStringTests::ExtendedAscii >();
+ add< JsonStringTests::EscapeFieldName >();
+ add< JsonStringTests::SingleIntMember >();
+ add< JsonStringTests::SingleNumberMember >();
+ add< JsonStringTests::InvalidNumbers >();
+ add< JsonStringTests::NumberPrecision >();
+ add< JsonStringTests::NegativeNumber >();
+ add< JsonStringTests::SingleBoolMember >();
+ add< JsonStringTests::SingleNullMember >();
+ add< JsonStringTests::SingleObjectMember >();
+ add< JsonStringTests::TwoMembers >();
+ add< JsonStringTests::EmptyArray >();
+ add< JsonStringTests::Array >();
+ add< JsonStringTests::DBRef >();
+ add< JsonStringTests::DBRefZero >();
+ add< JsonStringTests::ObjectId >();
+ add< JsonStringTests::BinData >();
+ add< JsonStringTests::Symbol >();
+ add< JsonStringTests::Date >();
+ add< JsonStringTests::Regex >();
+ add< JsonStringTests::RegexEscape >();
+ add< JsonStringTests::RegexManyOptions >();
+ add< JsonStringTests::CodeTests >();
+ add< JsonStringTests::TimestampTests >();
+ add< JsonStringTests::NullString >();
+ add< JsonStringTests::AllTypes >();
+
+ add< FromJsonTests::Empty >();
+ add< FromJsonTests::EmptyWithSpace >();
+ add< FromJsonTests::SingleString >();
+ add< FromJsonTests::EmptyStrings >();
+ add< FromJsonTests::ReservedFieldName >();
+ add< FromJsonTests::OkDollarFieldName >();
+ add< FromJsonTests::SingleNumber >();
+ add< FromJsonTests::RealNumber >();
+ add< FromJsonTests::FancyNumber >();
+ add< FromJsonTests::TwoElements >();
+ add< FromJsonTests::Subobject >();
+ add< FromJsonTests::ArrayEmpty >();
+ add< FromJsonTests::Array >();
+ add< FromJsonTests::True >();
+ add< FromJsonTests::False >();
+ add< FromJsonTests::Null >();
+ add< FromJsonTests::EscapedCharacters >();
+ add< FromJsonTests::NonEscapedCharacters >();
+ add< FromJsonTests::AllowedControlCharacter >();
+ add< FromJsonTests::EscapeFieldName >();
+ add< FromJsonTests::EscapedUnicodeToUtf8 >();
+ add< FromJsonTests::Utf8AllOnes >();
+ add< FromJsonTests::Utf8FirstByteOnes >();
+ add< FromJsonTests::DBRef >();
+ add< FromJsonTests::NewDBRef >();
+ add< FromJsonTests::Oid >();
+ add< FromJsonTests::Oid2 >();
+ add< FromJsonTests::StringId >();
+ add< FromJsonTests::BinData >();
+ add< FromJsonTests::BinDataPaddedSingle >();
+ add< FromJsonTests::BinDataPaddedDouble >();
+ add< FromJsonTests::BinDataAllChars >();
+ add< FromJsonTests::Date >();
+ add< FromJsonTests::DateNonzero >();
+ add< FromJsonTests::DateTooLong >();
+ add< FromJsonTests::Regex >();
+ add< FromJsonTests::RegexEscape >();
+ add< FromJsonTests::RegexWithQuotes >();
+ add< FromJsonTests::RegexInvalidOption >();
+ add< FromJsonTests::RegexInvalidOption2 >();
+ add< FromJsonTests::Malformed >();
+ add< FromJsonTests::UnquotedFieldName >();
+ add< FromJsonTests::UnquotedFieldNameDollar >();
+ add< FromJsonTests::SingleQuotes >();
+ add< FromJsonTests::ObjectId >();
+ add< FromJsonTests::ObjectId2 >();
+ add< FromJsonTests::NumericTypes >();
+ add< FromJsonTests::NegativeNumericTypes >();
+ add< FromJsonTests::EmbeddedDatesFormat1 >();
+ add< FromJsonTests::EmbeddedDatesFormat2 >();
+ add< FromJsonTests::EmbeddedDatesFormat3 >();
+ add< FromJsonTests::NullString >();
+ }
+ } myall;
+
+} // namespace JsonTests
+
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
new file mode 100644
index 00000000000..9782eedaacb
--- /dev/null
+++ b/src/mongo/dbtests/jstests.cpp
@@ -0,0 +1,1052 @@
+// javajstests.cpp
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/instance.h"
+
+#include "../pch.h"
+#include "../scripting/engine.h"
+#include "../util/timer.h"
+
+#include "dbtests.h"
+
+namespace mongo {
+ bool dbEval(const string& dbName , BSONObj& cmd, BSONObjBuilder& result, string& errmsg);
+} // namespace mongo
+
+namespace JSTests {
+
+ class Fundamental {
+ public:
+ void run() {
+ // By calling JavaJSImpl() inside run(), we ensure the unit test framework's
+ // signal handlers are pre-installed from JNI's perspective. This allows
+ // JNI to catch signals generated within the JVM and forward other signals
+ // as appropriate.
+ ScriptEngine::setup();
+ globalScriptEngine->runTest();
+ }
+ };
+
+ class BasicScope {
+ public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->setNumber( "x" , 5 );
+ ASSERT( 5 == s->getNumber( "x" ) );
+
+ s->setNumber( "x" , 1.67 );
+ ASSERT( 1.67 == s->getNumber( "x" ) );
+
+ s->setString( "s" , "eliot was here" );
+ ASSERT( "eliot was here" == s->getString( "s" ) );
+
+ s->setBoolean( "b" , true );
+ ASSERT( s->getBoolean( "b" ) );
+
+ if ( 0 ) {
+ s->setBoolean( "b" , false );
+ ASSERT( ! s->getBoolean( "b" ) );
+ }
+ }
+ };
+
+ class ResetScope {
+ public:
+ void run() {
+ // Not worrying about this for now SERVER-446.
+ /*
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->setBoolean( "x" , true );
+ ASSERT( s->getBoolean( "x" ) );
+
+ s->reset();
+ ASSERT( !s->getBoolean( "x" ) );
+ */
+ }
+ };
+
+ class FalseTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ ASSERT( ! s->getBoolean( "x" ) );
+
+ s->setString( "z" , "" );
+ ASSERT( ! s->getBoolean( "z" ) );
+
+
+ delete s ;
+ }
+ };
+
+ class SimpleFunctions {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ s->invoke( "x=5;" , 0, 0 );
+ ASSERT( 5 == s->getNumber( "x" ) );
+
+ s->invoke( "return 17;" , 0, 0 );
+ ASSERT( 17 == s->getNumber( "return" ) );
+
+ s->invoke( "function(){ return 17; }" , 0, 0 );
+ ASSERT( 17 == s->getNumber( "return" ) );
+
+ s->setNumber( "x" , 1.76 );
+ s->invoke( "return x == 1.76; " , 0, 0 );
+ ASSERT( s->getBoolean( "return" ) );
+
+ s->setNumber( "x" , 1.76 );
+ s->invoke( "return x == 1.79; " , 0, 0 );
+ ASSERT( ! s->getBoolean( "return" ) );
+
+ BSONObj obj = BSON( "" << 11.0 );
+ s->invoke( "function( z ){ return 5 + z; }" , &obj, 0 );
+ ASSERT_EQUALS( 16 , s->getNumber( "return" ) );
+
+ delete s;
+ }
+ };
+
+ class ObjectMapping {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON( "x" << 17.0 << "y" << "eliot" << "z" << "sara" );
+ s->setObject( "blah" , o );
+
+ s->invoke( "return blah.x;" , 0, 0 );
+ ASSERT_EQUALS( 17 , s->getNumber( "return" ) );
+ s->invoke( "return blah.y;" , 0, 0 );
+ ASSERT_EQUALS( "eliot" , s->getString( "return" ) );
+
+ s->invoke( "return this.z;" , 0, &o );
+ ASSERT_EQUALS( "sara" , s->getString( "return" ) );
+
+ s->invoke( "return this.z == 'sara';" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "this.z == 'sara';" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "this.z == 'asara';" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "return this.x == 17;" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "return this.x == 18;" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function(){ return this.x == 17; }" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "function(){ return this.x == 18; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function (){ return this.x == 17; }" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "function z(){ return this.x == 18; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function (){ this.x == 17; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function z(){ this.x == 18; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "x = 5; for( ; x <10; x++){ a = 1; }" , 0, &o );
+ ASSERT_EQUALS( 10 , s->getNumber( "x" ) );
+
+ delete s;
+ }
+ };
+
+ class ObjectDecoding {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ s->invoke( "z = { num : 1 };" , 0, 0 );
+ BSONObj out = s->getObject( "z" );
+ ASSERT_EQUALS( 1 , out["num"].number() );
+ ASSERT_EQUALS( 1 , out.nFields() );
+
+ s->invoke( "z = { x : 'eliot' };" , 0, 0 );
+ out = s->getObject( "z" );
+ ASSERT_EQUALS( (string)"eliot" , out["x"].valuestr() );
+ ASSERT_EQUALS( 1 , out.nFields() );
+
+ BSONObj o = BSON( "x" << 17 );
+ s->setObject( "blah" , o );
+ out = s->getObject( "blah" );
+ ASSERT_EQUALS( 17 , out["x"].number() );
+
+ delete s;
+ }
+ };
+
+ class JSOIDTests {
+ public:
+ void run() {
+#ifdef MOZJS
+ Scope * s = globalScriptEngine->newScope();
+
+ s->localConnect( "blah" );
+
+ s->invoke( "z = { _id : new ObjectId() , a : 123 };" , 0, 0 );
+ BSONObj out = s->getObject( "z" );
+ ASSERT_EQUALS( 123 , out["a"].number() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
+
+ OID save = out["_id"].__oid();
+
+ s->setObject( "a" , out );
+
+ s->invoke( "y = { _id : a._id , a : 124 };" , 0, 0 );
+ out = s->getObject( "y" );
+ ASSERT_EQUALS( 124 , out["a"].number() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
+ ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
+
+ s->invoke( "y = { _id : new ObjectId( a._id ) , a : 125 };" , 0, 0 );
+ out = s->getObject( "y" );
+ ASSERT_EQUALS( 125 , out["a"].number() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
+ ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
+
+ delete s;
+#endif
+ }
+ };
+
+ class SetImplicit {
+ public:
+ void run() {
+ Scope *s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON( "foo" << "bar" );
+ s->setObject( "a.b", o );
+ ASSERT( s->getObject( "a" ).isEmpty() );
+
+ BSONObj o2 = BSONObj();
+ s->setObject( "a", o2 );
+ s->setObject( "a.b", o );
+ ASSERT( s->getObject( "a" ).isEmpty() );
+
+ o2 = fromjson( "{b:{}}" );
+ s->setObject( "a", o2 );
+ s->setObject( "a.b", o );
+ ASSERT( !s->getObject( "a" ).isEmpty() );
+ }
+ };
+
+ class ObjectModReadonlyTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON( "x" << 17 << "y" << "eliot" << "z" << "sara" << "zz" << BSONObj() );
+ s->setObject( "blah" , o , true );
+
+ s->invoke( "blah.y = 'e'", 0, 0 );
+ BSONObj out = s->getObject( "blah" );
+ ASSERT( strlen( out["y"].valuestr() ) > 1 );
+
+ s->invoke( "blah.a = 19;" , 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( out["a"].eoo() );
+
+ s->invoke( "blah.zz.a = 19;" , 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( out["zz"].embeddedObject()["a"].eoo() );
+
+ s->setObject( "blah.zz", BSON( "a" << 19 ) );
+ out = s->getObject( "blah" );
+ ASSERT( out["zz"].embeddedObject()["a"].eoo() );
+
+ s->invoke( "delete blah['x']" , 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( !out["x"].eoo() );
+
+ // read-only object itself can be overwritten
+ s->invoke( "blah = {}", 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( out.isEmpty() );
+
+ // test array - can't implement this in v8
+// o = fromjson( "{a:[1,2,3]}" );
+// s->setObject( "blah", o, true );
+// out = s->getObject( "blah" );
+// s->invoke( "blah.a[ 0 ] = 4;", BSONObj() );
+// s->invoke( "delete blah['a'][ 2 ];", BSONObj() );
+// out = s->getObject( "blah" );
+// ASSERT_EQUALS( 1.0, out[ "a" ].embeddedObject()[ 0 ].number() );
+// ASSERT_EQUALS( 3.0, out[ "a" ].embeddedObject()[ 2 ].number() );
+
+ delete s;
+ }
+ };
+
+ class OtherJSTypes {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ {
+ // date
+ BSONObj o;
+ {
+ BSONObjBuilder b;
+ b.appendDate( "d" , 123456789 );
+ o = b.obj();
+ }
+ s->setObject( "x" , o );
+
+ s->invoke( "return x.d.getTime() != 12;" , 0, 0 );
+ ASSERT_EQUALS( true, s->getBoolean( "return" ) );
+
+ s->invoke( "z = x.d.getTime();" , 0, 0 );
+ ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) );
+
+ s->invoke( "z = { z : x.d }" , 0, 0 );
+ BSONObj out = s->getObject( "z" );
+ ASSERT( out["z"].type() == Date );
+ }
+
+ {
+ // regex
+ BSONObj o;
+ {
+ BSONObjBuilder b;
+ b.appendRegex( "r" , "^a" , "i" );
+ o = b.obj();
+ }
+ s->setObject( "x" , o );
+
+ s->invoke( "z = x.r.test( 'b' );" , 0, 0 );
+ ASSERT_EQUALS( false , s->getBoolean( "z" ) );
+
+ s->invoke( "z = x.r.test( 'a' );" , 0, 0 );
+ ASSERT_EQUALS( true , s->getBoolean( "z" ) );
+
+ s->invoke( "z = x.r.test( 'ba' );" , 0, 0 );
+ ASSERT_EQUALS( false , s->getBoolean( "z" ) );
+
+ s->invoke( "z = { a : x.r };" , 0, 0 );
+
+ BSONObj out = s->getObject("z");
+ ASSERT_EQUALS( (string)"^a" , out["a"].regex() );
+ ASSERT_EQUALS( (string)"i" , out["a"].regexFlags() );
+
+ }
+
+ // array
+ {
+ BSONObj o = fromjson( "{r:[1,2,3]}" );
+ s->setObject( "x", o, false );
+ BSONObj out = s->getObject( "x" );
+ ASSERT_EQUALS( Array, out.firstElement().type() );
+
+ s->setObject( "x", o, true );
+ out = s->getObject( "x" );
+ ASSERT_EQUALS( Array, out.firstElement().type() );
+ }
+
+ delete s;
+ }
+ };
+
+ class SpecialDBTypes {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ BSONObjBuilder b;
+ b.appendTimestamp( "a" , 123456789 );
+ b.appendMinKey( "b" );
+ b.appendMaxKey( "c" );
+ b.appendTimestamp( "d" , 1234000 , 9876 );
+
+
+ {
+ BSONObj t = b.done();
+ ASSERT_EQUALS( 1234000U , t["d"].timestampTime() );
+ ASSERT_EQUALS( 9876U , t["d"].timestampInc() );
+ }
+
+ s->setObject( "z" , b.obj() );
+
+ ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , 0, 0 ) == 0 );
+
+ BSONObj out = s->getObject( "y" );
+ ASSERT_EQUALS( Timestamp , out["a"].type() );
+ ASSERT_EQUALS( MinKey , out["b"].type() );
+ ASSERT_EQUALS( MaxKey , out["c"].type() );
+ ASSERT_EQUALS( Timestamp , out["d"].type() );
+
+ ASSERT_EQUALS( 9876U , out["d"].timestampInc() );
+ ASSERT_EQUALS( 1234000U , out["d"].timestampTime() );
+ ASSERT_EQUALS( 123456789U , out["a"].date() );
+
+ delete s;
+ }
+ };
+
+ class TypeConservation {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ // -- A --
+
+ BSONObj o;
+ {
+ BSONObjBuilder b ;
+ b.append( "a" , (int)5 );
+ b.append( "b" , 5.6 );
+ o = b.obj();
+ }
+ ASSERT_EQUALS( NumberInt , o["a"].type() );
+ ASSERT_EQUALS( NumberDouble , o["b"].type() );
+
+ s->setObject( "z" , o );
+ s->invoke( "return z" , 0, 0 );
+ BSONObj out = s->getObject( "return" );
+ ASSERT_EQUALS( 5 , out["a"].number() );
+ ASSERT_EQUALS( 5.6 , out["b"].number() );
+
+ ASSERT_EQUALS( NumberDouble , out["b"].type() );
+ ASSERT_EQUALS( NumberInt , out["a"].type() );
+
+ // -- B --
+
+ {
+ BSONObjBuilder b ;
+ b.append( "a" , (int)5 );
+ b.append( "b" , 5.6 );
+ o = b.obj();
+ }
+
+ s->setObject( "z" , o , false );
+ s->invoke( "return z" , 0, 0 );
+ out = s->getObject( "return" );
+ ASSERT_EQUALS( 5 , out["a"].number() );
+ ASSERT_EQUALS( 5.6 , out["b"].number() );
+
+ ASSERT_EQUALS( NumberDouble , out["b"].type() );
+ ASSERT_EQUALS( NumberInt , out["a"].type() );
+
+
+ // -- C --
+
+ {
+ BSONObjBuilder b ;
+
+ {
+ BSONObjBuilder c;
+ c.append( "0" , 5.5 );
+ c.append( "1" , 6 );
+ b.appendArray( "a" , c.obj() );
+ }
+
+ o = b.obj();
+ }
+
+ ASSERT_EQUALS( NumberDouble , o["a"].embeddedObjectUserCheck()["0"].type() );
+ ASSERT_EQUALS( NumberInt , o["a"].embeddedObjectUserCheck()["1"].type() );
+
+ s->setObject( "z" , o , false );
+ out = s->getObject( "z" );
+
+ ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
+ ASSERT_EQUALS( NumberInt , out["a"].embeddedObjectUserCheck()["1"].type() );
+
+ s->invokeSafe( "z.z = 5;" , 0, 0 );
+ out = s->getObject( "z" );
+ ASSERT_EQUALS( 5 , out["z"].number() );
+ ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
+ // Commenting so that v8 tests will work
+// ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior
+
+
+ // Eliot says I don't have to worry about this case
+
+// // -- D --
+//
+// o = fromjson( "{a:3.0,b:4.5}" );
+// ASSERT_EQUALS( NumberDouble , o["a"].type() );
+// ASSERT_EQUALS( NumberDouble , o["b"].type() );
+//
+// s->setObject( "z" , o , false );
+// s->invoke( "return z" , BSONObj() );
+// out = s->getObject( "return" );
+// ASSERT_EQUALS( 3 , out["a"].number() );
+// ASSERT_EQUALS( 4.5 , out["b"].number() );
+//
+// ASSERT_EQUALS( NumberDouble , out["b"].type() );
+// ASSERT_EQUALS( NumberDouble , out["a"].type() );
+//
+
+ delete s;
+ }
+
+ };
+
+ class NumberLong {
+ public:
+ void run() {
+ auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ s->localConnect( "blah" );
+ BSONObjBuilder b;
+ long long val = (long long)( 0xbabadeadbeefbaddULL );
+ b.append( "a", val );
+ BSONObj in = b.obj();
+ s->setObject( "a", in );
+ BSONObj out = s->getObject( "a" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+
+ ASSERT( s->exec( "b = {b:a.a}", "foo", false, true, false ) );
+ out = s->getObject( "b" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ if( val != out.firstElement().numberLong() ) {
+ cout << val << endl;
+ cout << out.firstElement().numberLong() << endl;
+ cout << out.toString() << endl;
+ ASSERT_EQUALS( val, out.firstElement().numberLong() );
+ }
+
+ ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
+ out = s->getObject( "c" );
+ stringstream ss;
+ ss << "NumberLong(\"" << val << "\")";
+ ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
+
+ ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
+ out = s->getObject( "d" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "e" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
+ out = s->getObject( "f" );
+ ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
+
+ s->setObject( "z", BSON( "z" << (long long)( 4 ) ) );
+ ASSERT( s->exec( "y = {y:z.z.top}", "foo", false, true, false ) );
+ out = s->getObject( "y" );
+ ASSERT_EQUALS( Undefined, out.firstElement().type() );
+
+ ASSERT( s->exec( "x = {x:z.z.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "x" );
+ ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
+ ASSERT_EQUALS( double( 4 ), out.firstElement().number() );
+
+ ASSERT( s->exec( "w = {w:z.z}", "foo", false, true, false ) );
+ out = s->getObject( "w" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ ASSERT_EQUALS( 4, out.firstElement().numberLong() );
+
+ }
+ };
+
+ class NumberLong2 {
+ public:
+ void run() {
+ auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ s->localConnect( "blah" );
+
+ BSONObj in;
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 5 );
+ b.append( "b" , (long long)5 );
+ b.append( "c" , (long long)pow( 2.0, 29 ) );
+ b.append( "d" , (long long)pow( 2.0, 30 ) );
+ b.append( "e" , (long long)pow( 2.0, 31 ) );
+ b.append( "f" , (long long)pow( 2.0, 45 ) );
+ in = b.obj();
+ }
+ s->setObject( "a" , in );
+
+ ASSERT( s->exec( "x = tojson( a ); " ,"foo" , false , true , false ) );
+ string outString = s->getString( "x" );
+
+ ASSERT( s->exec( (string)"y = " + outString , "foo2" , false , true , false ) );
+ BSONObj out = s->getObject( "y" );
+ ASSERT_EQUALS( in , out );
+ }
+ };
+
+ class NumberLongUnderLimit {
+ public:
+ void run() {
+ auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ s->localConnect( "blah" );
+ BSONObjBuilder b;
+ // limit is 2^53
+ long long val = (long long)( 9007199254740991ULL );
+ b.append( "a", val );
+ BSONObj in = b.obj();
+ s->setObject( "a", in );
+ BSONObj out = s->getObject( "a" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+
+ ASSERT( s->exec( "b = {b:a.a}", "foo", false, true, false ) );
+ out = s->getObject( "b" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ if( val != out.firstElement().numberLong() ) {
+ cout << val << endl;
+ cout << out.firstElement().numberLong() << endl;
+ cout << out.toString() << endl;
+ ASSERT_EQUALS( val, out.firstElement().numberLong() );
+ }
+
+ ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
+ out = s->getObject( "c" );
+ stringstream ss;
+ ss << "NumberLong(\"" << val << "\")";
+ ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
+
+ ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
+ out = s->getObject( "d" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "e" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
+ out = s->getObject( "f" );
+ ASSERT( Undefined == out.firstElement().type() );
+ }
+ };
+
+ class WeirdObjects {
+ public:
+
+ BSONObj build( int depth ) {
+ BSONObjBuilder b;
+ b.append( "0" , depth );
+ if ( depth > 0 )
+ b.appendArray( "1" , build( depth - 1 ) );
+ return b.obj();
+ }
+
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ s->localConnect( "blah" );
+
+ for ( int i=5; i<100 ; i += 10 ) {
+ s->setObject( "a" , build(i) , false );
+ s->invokeSafe( "tojson( a )" , 0, 0 );
+
+ s->setObject( "a" , build(5) , true );
+ s->invokeSafe( "tojson( a )" , 0, 0 );
+ }
+
+ delete s;
+ }
+ };
+
+
+ void dummy_function_to_force_dbeval_cpp_linking() {
+ BSONObj cmd;
+ BSONObjBuilder result;
+ string errmsg;
+ dbEval( "test", cmd, result, errmsg);
+ assert(0);
+ }
+
+ DBDirectClient client;
+
+ class Utf8Check {
+ public:
+ Utf8Check() { reset(); }
+ ~Utf8Check() { reset(); }
+ void run() {
+ if( !globalScriptEngine->utf8Ok() ) {
+ log() << "warning: utf8 not supported" << endl;
+ return;
+ }
+ string utf8ObjSpec = "{'_id':'\\u0001\\u007f\\u07ff\\uffff'}";
+ BSONObj utf8Obj = fromjson( utf8ObjSpec );
+ client.insert( ns(), utf8Obj );
+ client.eval( "unittest", "v = db.jstests.utf8check.findOne(); db.jstests.utf8check.remove( {} ); db.jstests.utf8check.insert( v );" );
+ check( utf8Obj, client.findOne( ns(), BSONObj() ) );
+ }
+ private:
+ void check( const BSONObj &one, const BSONObj &two ) {
+ if ( one.woCompare( two ) != 0 ) {
+ static string fail = string( "Assertion failure expected " ) + one.toString() + ", got " + two.toString();
+ FAIL( fail.c_str() );
+ }
+ }
+ void reset() {
+ client.dropCollection( ns() );
+ }
+ static const char *ns() { return "unittest.jstests.utf8check"; }
+ };
+
+ class LongUtf8String {
+ public:
+ LongUtf8String() { reset(); }
+ ~LongUtf8String() { reset(); }
+ void run() {
+ if( !globalScriptEngine->utf8Ok() )
+ return;
+ client.eval( "unittest", "db.jstests.longutf8string.save( {_id:'\\uffff\\uffff\\uffff\\uffff'} )" );
+ }
+ private:
+ void reset() {
+ client.dropCollection( ns() );
+ }
+ static const char *ns() { return "unittest.jstests.longutf8string"; }
+ };
+
+ class InvalidUTF8Check {
+ public:
+ void run() {
+ if( !globalScriptEngine->utf8Ok() )
+ return;
+
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ BSONObj b;
+ {
+ char crap[5];
+
+ crap[0] = (char) 128;
+ crap[1] = 17;
+ crap[2] = (char) 128;
+ crap[3] = 17;
+ crap[4] = 0;
+
+ BSONObjBuilder bb;
+ bb.append( "x" , crap );
+ b = bb.obj();
+ }
+
+ //cout << "ELIOT: " << b.jsonString() << endl;
+ // its ok if this is handled by js, just can't create a c++ exception
+ s->invoke( "x=this.x.length;" , 0, &b );
+ }
+ };
+
+ class CodeTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 1 );
+ b.appendCode( "b" , "function(){ out.b = 11; }" );
+ b.appendCodeWScope( "c" , "function(){ out.c = 12; }" , BSONObj() );
+ b.appendCodeWScope( "d" , "function(){ out.d = 13 + bleh; }" , BSON( "bleh" << 5 ) );
+ s->setObject( "foo" , b.obj() );
+ }
+
+ s->invokeSafe( "out = {}; out.a = foo.a; foo.b(); foo.c();" , 0, 0 );
+ BSONObj out = s->getObject( "out" );
+
+ ASSERT_EQUALS( 1 , out["a"].number() );
+ ASSERT_EQUALS( 11 , out["b"].number() );
+ ASSERT_EQUALS( 12 , out["c"].number() );
+
+ // Guess we don't care about this
+ //s->invokeSafe( "foo.d() " , BSONObj() );
+ //out = s->getObject( "out" );
+ //ASSERT_EQUALS( 18 , out["d"].number() );
+
+
+ delete s;
+ }
+ };
+
+ class DBRefTest {
+ public:
+ DBRefTest() {
+ _a = "unittest.dbref.a";
+ _b = "unittest.dbref.b";
+ reset();
+ }
+ ~DBRefTest() {
+ //reset();
+ }
+
+ void run() {
+
+ client.insert( _a , BSON( "a" << "17" ) );
+
+ {
+ BSONObj fromA = client.findOne( _a , BSONObj() );
+ assert( fromA.valid() );
+ //cout << "Froma : " << fromA << endl;
+ BSONObjBuilder b;
+ b.append( "b" , 18 );
+ b.appendDBRef( "c" , "dbref.a" , fromA["_id"].__oid() );
+ client.insert( _b , b.obj() );
+ }
+
+ ASSERT( client.eval( "unittest" , "x = db.dbref.b.findOne(); assert.eq( 17 , x.c.fetch().a , 'ref working' );" ) );
+
+ // BSON DBRef <=> JS DBPointer
+ ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBPointer( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
+ ASSERT_EQUALS( DBRef, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
+
+ // BSON Object <=> JS DBRef
+ ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBRef( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
+ ASSERT_EQUALS( Object, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
+ ASSERT_EQUALS( string( "dbref.a" ), client.findOne( "unittest.dbref.b", "" )[ "c" ].embeddedObject().getStringField( "$ref" ) );
+ }
+
+ void reset() {
+ client.dropCollection( _a );
+ client.dropCollection( _b );
+ }
+
+ const char * _a;
+ const char * _b;
+ };
+
+ class InformalDBRef {
+ public:
+ void run() {
+ client.insert( ns(), BSON( "i" << 1 ) );
+ BSONObj obj = client.findOne( ns(), BSONObj() );
+ client.remove( ns(), BSONObj() );
+ client.insert( ns(), BSON( "r" << BSON( "$ref" << "jstests.informaldbref" << "$id" << obj["_id"].__oid() << "foo" << "bar" ) ) );
+ obj = client.findOne( ns(), BSONObj() );
+ ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
+
+ ASSERT( client.eval( "unittest", "x = db.jstests.informaldbref.findOne(); y = { r:x.r }; db.jstests.informaldbref.drop(); y.r[ \"a\" ] = \"b\"; db.jstests.informaldbref.save( y );" ) );
+ obj = client.findOne( ns(), BSONObj() );
+ ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
+ ASSERT_EQUALS( "b", obj[ "r" ].embeddedObject()[ "a" ].str() );
+ }
+ private:
+ static const char *ns() { return "unittest.jstests.informaldbref"; }
+ };
+
+ class BinDataType {
+ public:
+
+ void pp( const char * s , BSONElement e ) {
+ int len;
+ const char * data = e.binData( len );
+ cout << s << ":" << e.binDataType() << "\t" << len << endl;
+ cout << "\t";
+ for ( int i=0; i<len; i++ )
+ cout << (int)(data[i]) << " ";
+ cout << endl;
+ }
+
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+ s->localConnect( "asd" );
+ const char * foo = "asdas\0asdasd";
+ const char * base64 = "YXNkYXMAYXNkYXNk";
+
+ BSONObj in;
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 7 );
+ b.appendBinData( "b" , 12 , BinDataGeneral , foo );
+ in = b.obj();
+ s->setObject( "x" , in );
+ }
+
+ s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , 0, 0 );
+ s->invokeSafe( "y = { c : myb };" , 0, 0 );
+
+ BSONObj out = s->getObject( "y" );
+ ASSERT_EQUALS( BinData , out["c"].type() );
+// pp( "in " , in["b"] );
+// pp( "out" , out["c"] );
+ ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
+
+ // check that BinData js class is utilized
+ s->invokeSafe( "q = x.b.toString();", 0, 0 );
+ stringstream expected;
+ expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")";
+ ASSERT_EQUALS( expected.str(), s->getString( "q" ) );
+
+ stringstream scriptBuilder;
+ scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };";
+ string script = scriptBuilder.str();
+ s->invokeSafe( script.c_str(), 0, 0 );
+ out = s->getObject( "z" );
+// pp( "out" , out["c"] );
+ ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
+
+ s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", 0, 0 );
+ out = s->getObject( "a" );
+ int len = -1;
+ out[ "f" ].binData( len );
+ ASSERT_EQUALS( 0, len );
+ ASSERT_EQUALS( 128, out[ "f" ].binDataType() );
+
+ delete s;
+ }
+ };
+
+ class VarTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ ASSERT( s->exec( "a = 5;" , "a" , false , true , false ) );
+ ASSERT_EQUALS( 5 , s->getNumber("a" ) );
+
+ ASSERT( s->exec( "var b = 6;" , "b" , false , true , false ) );
+ ASSERT_EQUALS( 6 , s->getNumber("b" ) );
+ delete s;
+ }
+ };
+
+ class Speed1 {
+ public:
+ void run() {
+ BSONObj start = BSON( "x" << 5.0 );
+ BSONObj empty;
+
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ ScriptingFunction f = s->createFunction( "return this.x + 6;" );
+
+ Timer t;
+ double n = 0;
+ for ( ; n < 100000; n++ ) {
+ s->invoke( f , &empty, &start );
+ ASSERT_EQUALS( 11 , s->getNumber( "return" ) );
+ }
+ //cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << endl;
+ }
+ };
+
+ class ScopeOut {
+ public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->invokeSafe( "x = 5;" , 0, 0 );
+ {
+ BSONObjBuilder b;
+ s->append( b , "z" , "x" );
+ ASSERT_EQUALS( BSON( "z" << 5 ) , b.obj() );
+ }
+
+ s->invokeSafe( "x = function(){ return 17; }" , 0, 0 );
+ BSONObj temp;
+ {
+ BSONObjBuilder b;
+ s->append( b , "z" , "x" );
+ temp = b.obj();
+ }
+
+ s->invokeSafe( "foo = this.z();" , 0, &temp );
+ ASSERT_EQUALS( 17 , s->getNumber( "foo" ) );
+ }
+ };
+
+ class RenameTest {
+ public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->setNumber( "x" , 5 );
+ ASSERT_EQUALS( 5 , s->getNumber( "x" ) );
+ ASSERT_EQUALS( Undefined , s->type( "y" ) );
+
+ s->rename( "x" , "y" );
+ ASSERT_EQUALS( 5 , s->getNumber( "y" ) );
+ ASSERT_EQUALS( Undefined , s->type( "x" ) );
+
+ s->rename( "y" , "x" );
+ ASSERT_EQUALS( 5 , s->getNumber( "x" ) );
+ ASSERT_EQUALS( Undefined , s->type( "y" ) );
+ }
+ };
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "js" ) {
+ }
+
+ void setupTests() {
+ add< Fundamental >();
+ add< BasicScope >();
+ add< ResetScope >();
+ add< FalseTests >();
+ add< SimpleFunctions >();
+
+ add< ObjectMapping >();
+ add< ObjectDecoding >();
+ add< JSOIDTests >();
+ add< SetImplicit >();
+ add< ObjectModReadonlyTests >();
+ add< OtherJSTypes >();
+ add< SpecialDBTypes >();
+ add< TypeConservation >();
+ add< NumberLong >();
+ add< NumberLong2 >();
+ add< RenameTest >();
+
+ add< WeirdObjects >();
+ add< CodeTests >();
+ add< DBRefTest >();
+ add< InformalDBRef >();
+ add< BinDataType >();
+
+ add< VarTests >();
+
+ add< Speed1 >();
+
+ add< InvalidUTF8Check >();
+ add< Utf8Check >();
+ add< LongUtf8String >();
+
+ add< ScopeOut >();
+ }
+ } myall;
+
+} // namespace JavaJSTests
+
diff --git a/src/mongo/dbtests/macrotests.cpp b/src/mongo/dbtests/macrotests.cpp
new file mode 100644
index 00000000000..f547c851677
--- /dev/null
+++ b/src/mongo/dbtests/macrotests.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef MONGO_EXPOSE_MACROS
+
+#include "../client/dbclient.h"
+
+#ifdef malloc
+# error malloc defined 0
+#endif
+
+#ifdef assert
+# error assert defined 1
+#endif
+
+#include "../client/parallel.h" //uses assert
+
+#ifdef assert
+# error assert defined 2
+#endif
+
+#include "../client/redef_macros.h"
+
+#ifndef assert
+# error assert not defined 3
+#endif
+
+#include "../client/undef_macros.h"
+
+#ifdef assert
+# error assert defined 3
+#endif
+
+
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
new file mode 100644
index 00000000000..380b8b802d4
--- /dev/null
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -0,0 +1,163 @@
+// matchertests.cpp : matcher unit tests
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../util/timer.h"
+
+#include "../db/matcher.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+
+
+namespace MatcherTests {
+
+ class Basic {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{\"a\":\"b\"}" );
+ Matcher m( query );
+ ASSERT( m.matches( fromjson( "{\"a\":\"b\"}" ) ) );
+ }
+ };
+
+ class DoubleEqual {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{\"a\":5}" );
+ Matcher m( query );
+ ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
+ }
+ };
+
+ class MixedNumericEqual {
+ public:
+ void run() {
+ BSONObjBuilder query;
+ query.append( "a", 5 );
+ Matcher m( query.done() );
+ ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
+ }
+ };
+
+ class MixedNumericGt {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{\"a\":{\"$gt\":4}}" );
+ Matcher m( query );
+ BSONObjBuilder b;
+ b.append( "a", 5 );
+ ASSERT( m.matches( b.done() ) );
+ }
+ };
+
+ class MixedNumericIN {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{ a : { $in : [4,6] } }" );
+ ASSERT_EQUALS( 4 , query["a"].embeddedObject()["$in"].embeddedObject()["0"].number() );
+ ASSERT_EQUALS( NumberInt , query["a"].embeddedObject()["$in"].embeddedObject()["0"].type() );
+
+ Matcher m( query );
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 4.0 );
+ ASSERT( m.matches( b.done() ) );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 5 );
+ ASSERT( ! m.matches( b.done() ) );
+ }
+
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 4 );
+ ASSERT( m.matches( b.done() ) );
+ }
+
+ }
+ };
+
+ class MixedNumericEmbedded {
+ public:
+ void run() {
+ Matcher m( BSON( "a" << BSON( "x" << 1 ) ) );
+ ASSERT( m.matches( BSON( "a" << BSON( "x" << 1 ) ) ) );
+ ASSERT( m.matches( BSON( "a" << BSON( "x" << 1.0 ) ) ) );
+ }
+ };
+
+ class Size {
+ public:
+ void run() {
+ Matcher m( fromjson( "{a:{$size:4}}" ) );
+ ASSERT( m.matches( fromjson( "{a:[1,2,3,4]}" ) ) );
+ ASSERT( !m.matches( fromjson( "{a:[1,2,3]}" ) ) );
+ ASSERT( !m.matches( fromjson( "{a:[1,2,3,'a','b']}" ) ) );
+ ASSERT( !m.matches( fromjson( "{a:[[1,2,3,4]]}" ) ) );
+ }
+ };
+
+
+ class TimingBase {
+ public:
+ long time( const BSONObj& patt , const BSONObj& obj ) {
+ Matcher m( patt );
+ Timer t;
+ for ( int i=0; i<10000; i++ ) {
+ ASSERT( m.matches( obj ) );
+ }
+ return t.millis();
+ }
+ };
+
+ class AllTiming : public TimingBase {
+ public:
+ void run() {
+ long normal = time( BSON( "x" << 5 ) , BSON( "x" << 5 ) );
+ long all = time( BSON( "x" << BSON( "$all" << BSON_ARRAY( 5 ) ) ) , BSON( "x" << 5 ) );
+
+ cout << "normal: " << normal << " all: " << all << endl;
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "matcher" ) {
+ }
+
+ void setupTests() {
+ add< Basic >();
+ add< DoubleEqual >();
+ add< MixedNumericEqual >();
+ add< MixedNumericGt >();
+ add< MixedNumericIN >();
+ add< Size >();
+ add< MixedNumericEmbedded >();
+ add< AllTiming >();
+ }
+ } dball;
+
+} // namespace MatcherTests
+
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
new file mode 100644
index 00000000000..7fb6eee98fc
--- /dev/null
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -0,0 +1,219 @@
+// @file mmaptests.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/mongommf.h"
+#include "../util/timer.h"
+#include "dbtests.h"
+
+namespace MMapTests {
+
+ class LeakTest {
+ const string fn;
+ const int optOld;
+ public:
+ LeakTest() :
+ fn( (path(dbpath) / "testfile.map").string() ), optOld(cmdLine.durOptions)
+ {
+ cmdLine.durOptions = 0; // DurParanoid doesn't make sense with this test
+ }
+ ~LeakTest() {
+ cmdLine.durOptions = optOld;
+ try { boost::filesystem::remove(fn); }
+ catch(...) { }
+ }
+ void run() {
+
+ try { boost::filesystem::remove(fn); }
+ catch(...) { }
+
+ writelock lk;
+
+ {
+ MongoMMF f;
+ unsigned long long len = 256 * 1024 * 1024;
+ assert( f.create(fn, len, /*sequential*/false) );
+ {
+ char *p = (char *) f.getView();
+ assert(p);
+ // write something to the private view as a test
+ if( cmdLine.dur )
+ MemoryMappedFile::makeWritable(p, 6);
+ strcpy(p, "hello");
+ }
+ if( cmdLine.dur ) {
+ char *w = (char *) f.view_write();
+ strcpy(w + 6, "world");
+ }
+ MongoFileFinder ff;
+ ASSERT( ff.findByPath(fn) );
+ ASSERT( ff.findByPath("asdf") == 0 );
+ }
+ {
+ MongoFileFinder ff;
+ ASSERT( ff.findByPath(fn) == 0 );
+ }
+
+ int N = 10000;
+#if !defined(_WIN32) && !defined(__linux__)
+ // seems this test is slow on OS X.
+ N = 100;
+#endif
+
+ // we make a lot here -- if we were leaking, presumably it would fail doing this many.
+ Timer t;
+ for( int i = 0; i < N; i++ ) {
+ MongoMMF f;
+ assert( f.open(fn, i%4==1) );
+ {
+ char *p = (char *) f.getView();
+ assert(p);
+ if( cmdLine.dur )
+ MemoryMappedFile::makeWritable(p, 4);
+ strcpy(p, "zzz");
+ }
+ if( cmdLine.dur ) {
+ char *w = (char *) f.view_write();
+ if( i % 2 == 0 )
+ ++(*w);
+ assert( w[6] == 'w' );
+ }
+ }
+ if( t.millis() > 10000 ) {
+ log() << "warning: MMap LeakTest is unusually slow N:" << N << ' ' << t.millis() << "ms" << endl;
+ }
+
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "mmap" ) {}
+ void setupTests() {
+ add< LeakTest >();
+ }
+ } myall;
+
+#if 0
+
+ class CopyOnWriteSpeedTest {
+ public:
+ void run() {
+
+ string fn = "/tmp/testfile.map";
+ boost::filesystem::remove(fn);
+
+ MemoryMappedFile f;
+ char *p = (char *) f.create(fn, 1024 * 1024 * 1024, true);
+ assert(p);
+ strcpy(p, "hello");
+
+ {
+ void *x = f.testGetCopyOnWriteView();
+ Timer tt;
+ for( int i = 11; i < 1000000000; i++ )
+ p[i] = 'z';
+ cout << "fill 1GB time: " << tt.millis() << "ms" << endl;
+ f.testCloseCopyOnWriteView(x);
+ }
+
+ /* test a lot of view/unviews */
+ {
+ Timer t;
+
+ char *q;
+ for( int i = 0; i < 1000; i++ ) {
+ q = (char *) f.testGetCopyOnWriteView();
+ assert( q );
+ if( i == 999 ) {
+ strcpy(q+2, "there");
+ }
+ f.testCloseCopyOnWriteView(q);
+ }
+
+ cout << "view unview: " << t.millis() << "ms" << endl;
+ }
+
+ f.flush(true);
+
+ /* plain old mmaped writes */
+ {
+ Timer t;
+ for( int i = 0; i < 10; i++ ) {
+ memset(p+100, 'c', 200 * 1024 * 1024);
+ }
+ cout << "traditional writes: " << t.millis() << "ms" << endl;
+ }
+
+ f.flush(true);
+
+ /* test doing some writes */
+ {
+ Timer t;
+ char *q = (char *) f.testGetCopyOnWriteView();
+ for( int i = 0; i < 10; i++ ) {
+ assert( q );
+ memset(q+100, 'c', 200 * 1024 * 1024);
+ }
+ f.testCloseCopyOnWriteView(q);
+
+ cout << "inc style some writes: " << t.millis() << "ms" << endl;
+ }
+
+ /* test doing some writes */
+ {
+ Timer t;
+ for( int i = 0; i < 10; i++ ) {
+ char *q = (char *) f.testGetCopyOnWriteView();
+ assert( q );
+ memset(q+100, 'c', 200 * 1024 * 1024);
+ f.testCloseCopyOnWriteView(q);
+ }
+
+ cout << "some writes: " << t.millis() << "ms" << endl;
+ }
+
+ /* more granular */
+ {
+ Timer t;
+ for( int i = 0; i < 100; i++ ) {
+ char *q = (char *) f.testGetCopyOnWriteView();
+ assert( q );
+ memset(q+100, 'c', 20 * 1024 * 1024);
+ f.testCloseCopyOnWriteView(q);
+ }
+
+ cout << "more granular some writes: " << t.millis() << "ms" << endl;
+ }
+
+ p[10] = 0;
+ cout << p << endl;
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "mmap" ) {}
+ void setupTests() {
+ add< CopyOnWriteSpeedTest >();
+ }
+ } myall;
+
+#endif
+
+}
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
new file mode 100644
index 00000000000..792baf2ccfa
--- /dev/null
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -0,0 +1,1244 @@
+// namespacetests.cpp : namespace.{h,cpp} unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// Where IndexDetails defined.
+#include "pch.h"
+#include "../db/namespace.h"
+
+#include "../db/db.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+namespace NamespaceTests {
+
+ const int MinExtentSize = 4096;
+
+ namespace IndexDetailsTests {
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context(ns()) {
+ }
+ virtual ~Base() {
+ if ( id_.info.isNull() )
+ return;
+ theDataFileMgr.deleteRecord( ns(), id_.info.rec(), id_.info );
+ ASSERT( theDataFileMgr.findAll( ns() )->eof() );
+ }
+ protected:
+ void create( bool sparse = false ) {
+ NamespaceDetailsTransient::get( ns() ).deletedIndex();
+ BSONObjBuilder builder;
+ builder.append( "ns", ns() );
+ builder.append( "name", "testIndex" );
+ builder.append( "key", key() );
+ builder.append( "sparse", sparse );
+ BSONObj bobj = builder.done();
+ id_.info = theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
+ // head not needed for current tests
+ // idx_.head = BtreeBucket::addHead( id_ );
+ }
+ static const char* ns() {
+ return "unittests.indexdetailstests";
+ }
+ IndexDetails& id() {
+ return id_;
+ }
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "a", 1 );
+ return k.obj();
+ }
+ BSONObj aDotB() const {
+ BSONObjBuilder k;
+ k.append( "a.b", 1 );
+ return k.obj();
+ }
+ BSONObj aAndB() const {
+ BSONObjBuilder k;
+ k.append( "a", 1 );
+ k.append( "b", 1 );
+ return k.obj();
+ }
+ static vector< int > shortArray() {
+ vector< int > a;
+ a.push_back( 1 );
+ a.push_back( 2 );
+ a.push_back( 3 );
+ return a;
+ }
+ static BSONObj simpleBC( int i ) {
+ BSONObjBuilder b;
+ b.append( "b", i );
+ b.append( "c", 4 );
+ return b.obj();
+ }
+ static void checkSize( int expected, const BSONObjSet &objs ) {
+ ASSERT_EQUALS( BSONObjSet::size_type( expected ), objs.size() );
+ }
+ static void assertEquals( const BSONObj &a, const BSONObj &b ) {
+ if ( a.woCompare( b ) != 0 ) {
+ out() << "expected: " << a.toString()
+ << ", got: " << b.toString() << endl;
+ }
+ ASSERT( a.woCompare( b ) == 0 );
+ }
+ BSONObj nullObj() const {
+ BSONObjBuilder b;
+ b.appendNull( "" );
+ return b.obj();
+ }
+ private:
+ dblock lk_;
+ IndexDetails id_;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT_EQUALS( "testIndex", id().indexName() );
+ ASSERT_EQUALS( ns(), id().parentNS() );
+ assertEquals( key(), id().keyPattern() );
+ }
+ };
+
+ class GetKeysFromObjectSimple : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b, e;
+ b.append( "b", 4 );
+ b.append( "a", 5 );
+ e.append( "", 5 );
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 1, keys );
+ assertEquals( e.obj(), *keys.begin() );
+ }
+ };
+
+ class GetKeysFromObjectDotted : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder a, e, b;
+ b.append( "b", 4 );
+ a.append( "a", b.done() );
+ a.append( "c", "foo" );
+ e.append( "", 4 );
+ BSONObjSet keys;
+ id().getKeysFromObject( a.done(), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( e.obj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class GetKeysFromArraySimple : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray()) ;
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ };
+
+ class GetKeysFromArrayFirstElement : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray() );
+ b.append( "b", 2 );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ b.append( "", 2 );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class GetKeysFromArraySecondElement : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "first", 5 );
+ b.append( "a", shortArray()) ;
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", 5 );
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "first", 1 );
+ k.append( "a", 1 );
+ return k.obj();
+ }
+ };
+
+ class GetKeysFromSecondLevelArray : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "b", shortArray() );
+ BSONObjBuilder a;
+ a.append( "a", b.done() );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( a.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ParallelArraysBasic : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray() );
+ b.append( "b", shortArray() );
+
+ BSONObjSet keys;
+ ASSERT_THROWS( id().getKeysFromObject( b.done(), keys ),
+ UserException );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class ArraySubobjectBasic : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ArraySubobjectMultiFieldIndex : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+ b.append( "d", 99 );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder c;
+ c.append( "", j );
+ c.append( "", 99 );
+ assertEquals( c.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "a.b", 1 );
+ k.append( "d", 1 );
+ return k.obj();
+ }
+ };
+
+ class ArraySubobjectSingleMissing : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ BSONObjBuilder s;
+ s.append( "foo", 41 );
+ elts.push_back( s.obj() );
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+ BSONObj obj = b.obj();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( obj, keys );
+ checkSize( 4, keys );
+ BSONObjSet::iterator i = keys.begin();
+ assertEquals( nullObj(), *i++ ); // see SERVER-3377
+ for ( int j = 1; j < 4; ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ArraySubobjectMissing : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ BSONObjBuilder s;
+ s.append( "foo", 41 );
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( s.done() );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 1, keys );
+ assertEquals( nullObj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class MissingField : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( BSON( "b" << 1 ), keys );
+ checkSize( 1, keys );
+ assertEquals( nullObj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return BSON( "a" << 1 );
+ }
+ };
+
+ class SubobjectMissing : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
+ checkSize( 1, keys );
+ assertEquals( nullObj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class CompoundMissing : public Base {
+ public:
+ void run() {
+ create();
+
+ {
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{x:'a',y:'b'}" ) , keys );
+ checkSize( 1 , keys );
+ assertEquals( BSON( "" << "a" << "" << "b" ) , *keys.begin() );
+ }
+
+ {
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{x:'a'}" ) , keys );
+ checkSize( 1 , keys );
+ BSONObjBuilder b;
+ b.append( "" , "a" );
+ b.appendNull( "" );
+ assertEquals( b.obj() , *keys.begin() );
+ }
+
+ }
+
+ private:
+ virtual BSONObj key() const {
+ return BSON( "x" << 1 << "y" << 1 );
+ }
+
+ };
+
+ class ArraySubelementComplex : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:[2]}]}" ), keys );
+ checkSize( 1, keys );
+ assertEquals( BSON( "" << 2 ), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ParallelArraysComplex : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ ASSERT_THROWS( id().getKeysFromObject( fromjson( "{a:[{b:[1],c:[2]}]}" ), keys ),
+ UserException );
+ }
+ private:
+ virtual BSONObj key() const {
+ return fromjson( "{'a.b':1,'a.c':1}" );
+ }
+ };
+
+ class AlternateMissing : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:1},{c:2}]}" ), keys );
+ checkSize( 2, keys );
+ BSONObjSet::iterator i = keys.begin();
+ {
+ BSONObjBuilder e;
+ e.appendNull( "" );
+ e.append( "", 2 );
+ assertEquals( e.obj(), *i++ );
+ }
+
+ {
+ BSONObjBuilder e;
+ e.append( "", 1 );
+ e.appendNull( "" );
+ assertEquals( e.obj(), *i++ );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return fromjson( "{'a.b':1,'a.c':1}" );
+ }
+ };
+
+ class MultiComplex : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:1},{b:[1,2,3]}]}" ), keys );
+ checkSize( 3, keys );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class EmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
+ checkSize(2, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:null}" ), keys );
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize(1, keys );
+ ASSERT_EQUALS( Undefined, keys.begin()->firstElement().type() );
+ keys.clear();
+ }
+ };
+
+ class DoubleArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
+ checkSize(2, keys );
+ BSONObjSet::const_iterator i = keys.begin();
+ ASSERT_EQUALS( BSON( "" << 1 << "" << 1 ), *i );
+ ++i;
+ ASSERT_EQUALS( BSON( "" << 2 << "" << 2 ), *i );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return BSON( "a" << 1 << "a" << 1 );
+ }
+ };
+
+ class DoubleEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize(1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return BSON( "a" << 1 << "a" << 1 );
+ }
+ };
+
+ class MultiEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1,b:[1,2]}" ), keys );
+ checkSize(2, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:1,b:[1]}" ), keys );
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:1,b:null}" ), keys );
+ //cout << "YO : " << *(keys.begin()) << endl;
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:1,b:[]}" ), keys );
+ checkSize(1, keys );
+ //cout << "YO : " << *(keys.begin()) << endl;
+ BSONObjIterator i( *keys.begin() );
+ ASSERT_EQUALS( NumberInt , i.next().type() );
+ ASSERT_EQUALS( Undefined , i.next().type() );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class NestedEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class MultiNestedEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':null}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a.c" << 1 ); }
+ };
+
+ class UnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined,'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':{b:1},'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':{b:[]},'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a" << 1 << "a.b" << 1 ); }
+ };
+
+ class ReverseUnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a" << 1 ); }
+ };
+
+ class SparseReverseUnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a" << 1 ); }
+ };
+
+ class SparseEmptyArray : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{c:1}]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class SparseEmptyArraySecond : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{c:1}]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "z" << 1 << "a.b" << 1 ); }
+ };
+
+ class NonObjectMissingNestedField : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1,{b:1}]}" ), keys );
+ checkSize( 2, keys );
+ BSONObjSet::const_iterator c = keys.begin();
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *c );
+ ++c;
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *c );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class SparseNonObjectMissingNestedField : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1,{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class IndexedArrayIndex : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( BSON( "" << 1 ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[1]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:{'0':1}}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( BSON( "" << 1 ), *keys.begin() );
+ keys.clear();
+
+ ASSERT_THROWS( id().getKeysFromObject( fromjson( "{a:[{'0':1}]}" ), keys ), UserException );
+
+ ASSERT_THROWS( id().getKeysFromObject( fromjson( "{a:[1,{'0':2}]}" ), keys ), UserException );
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0" << 1 ); }
+ };
+
+ class DoubleIndexedArrayIndex : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[[1]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[[]]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.0" << 1 ); }
+ };
+
+ class ObjectWithinArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[1]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[[1]]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:1}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[1]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[[1]]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.b" << 1 ); }
+ };
+
+ class ArrayWithinObjectWithinArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:[1]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.b.0" << 1 ); }
+ };
+
+ // also test numeric string field names
+
+ } // namespace IndexDetailsTests
+
+ namespace NamespaceDetailsTests {
+
+ class Base {
+ const char *ns_;
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( s, errmsg, result );
+ }
+ protected:
+ void create() {
+ dblock lk;
+ string err;
+ ASSERT( userCreateNS( ns(), fromjson( spec() ), err, false ) );
+ }
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":1}";
+ }
+ int nRecords() const {
+ int count = 0;
+ for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext ) {
+ int fileNo = i.ext()->firstRecord.a();
+ if ( fileNo == -1 )
+ continue;
+ for ( int j = i.ext()->firstRecord.getOfs(); j != DiskLoc::NullOfs;
+ j = DiskLoc( fileNo, j ).rec()->nextOfs ) {
+ ++count;
+ }
+ }
+ ASSERT_EQUALS( count, nsd()->stats.nrecords );
+ return count;
+ }
+ int nExtents() const {
+ int count = 0;
+ for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ ++count;
+ return count;
+ }
+ static int min( int a, int b ) {
+ return a < b ? a : b;
+ }
+ const char *ns() const {
+ return ns_;
+ }
+ NamespaceDetails *nsd() const {
+ return nsdetails( ns() )->writingWithExtra();
+ }
+ static BSONObj bigObj(bool bGenID=false) {
+ BSONObjBuilder b;
+ if (bGenID)
+ b.appendOID("_id", 0, true);
+ string as( 187, 'a' );
+ b.append( "a", as );
+ return b.obj();
+ }
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT( nsd() );
+ ASSERT_EQUALS( 0, nRecords() );
+ ASSERT( nsd()->firstExtent == nsd()->capExtent );
+ DiskLoc initial = DiskLoc();
+ initial.setInvalid();
+ ASSERT( initial == nsd()->capFirstNewRecord );
+ }
+ };
+
+ class SingleAlloc : public Base {
+ public:
+ void run() {
+ create();
+ BSONObj b = bigObj();
+ ASSERT( !theDataFileMgr.insert( ns(), b.objdata(), b.objsize() ).isNull() );
+ ASSERT_EQUALS( 1, nRecords() );
+ }
+ };
+
+ class Realloc : public Base {
+ public:
+ void run() {
+ create();
+
+ const int N = 20;
+ const int Q = 16; // these constants depend on the size of the bson object, the extent size allocated by the system too
+ DiskLoc l[ N ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj b = bigObj(true);
+ l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() );
+ ASSERT( !l[ i ].isNull() );
+ ASSERT( nRecords() <= Q );
+ //ASSERT_EQUALS( 1 + i % 2, nRecords() );
+ if ( i >= 16 )
+ ASSERT( l[ i ] == l[ i - Q] );
+ }
+ }
+ };
+
+ class TwoExtent : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ BSONObj b = bigObj();
+
+ DiskLoc l[ 8 ];
+ for ( int i = 0; i < 8; ++i ) {
+ l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() );
+ ASSERT( !l[ i ].isNull() );
+ //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ //if ( i > 3 )
+ // ASSERT( l[ i ] == l[ i - 4 ] );
+ }
+ ASSERT( nRecords() == 8 );
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
+ BSONObj bigger = bob.done();
+ ASSERT( theDataFileMgr.insert( ns(), bigger.objdata(), bigger.objsize() ).isNull() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ private:
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ };
+
+ /* test NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc loc)
+ */
+ class TruncateCapped : public Base {
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ void pass(int p) {
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ BSONObj b = bigObj(true);
+
+ int N = MinExtentSize / b.objsize() * nExtents() + 5;
+ int T = N - 4;
+
+ DiskLoc truncAt;
+ //DiskLoc l[ 8 ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj bb = bigObj(true);
+ DiskLoc a = theDataFileMgr.insert( ns(), bb.objdata(), bb.objsize() );
+ if( T == i )
+ truncAt = a;
+ ASSERT( !a.isNull() );
+ /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ if ( i > 3 )
+ ASSERT( l[ i ] == l[ i - 4 ] );*/
+ }
+ ASSERT( nRecords() < N );
+
+ NamespaceDetails *nsd = nsdetails(ns());
+
+ DiskLoc last, first;
+ {
+ ReverseCappedCursor c(nsd);
+ last = c.currLoc();
+ ASSERT( !last.isNull() );
+ }
+ {
+ ForwardCappedCursor c(nsd);
+ first = c.currLoc();
+ ASSERT( !first.isNull() );
+ ASSERT( first != last ) ;
+ }
+
+ nsd->cappedTruncateAfter(ns(), truncAt, false);
+ ASSERT_EQUALS( nsd->stats.nrecords , 28 );
+
+ {
+ ForwardCappedCursor c(nsd);
+ ASSERT( first == c.currLoc() );
+ }
+ {
+ ReverseCappedCursor c(nsd);
+ ASSERT( last != c.currLoc() ); // old last should be deleted
+ ASSERT( !last.isNull() );
+ }
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.appendOID("_id", 0, true);
+ bob.append( "a", string( MinExtentSize + 300, 'a' ) );
+ BSONObj bigger = bob.done();
+ ASSERT( theDataFileMgr.insert( ns(), bigger.objdata(), bigger.objsize() ).isNull() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ public:
+ void run() {
+// log() << "******** NOT RUNNING TruncateCapped test yet ************" << endl;
+ pass(0);
+ }
+ };
+
+ class Migrate : public Base {
+ public:
+ void run() {
+ create();
+ nsd()->deletedList[ 2 ] = nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted.drec()->nextDeleted;
+ nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted.drec()->nextDeleted.writing() = DiskLoc();
+ nsd()->cappedLastDelRecLastExtent().Null();
+ NamespaceDetails *d = nsd();
+ zero( &d->capExtent );
+ zero( &d->capFirstNewRecord );
+
+ nsd();
+
+ ASSERT( nsd()->firstExtent == nsd()->capExtent );
+ ASSERT( nsd()->capExtent.getOfs() != 0 );
+ ASSERT( !nsd()->capFirstNewRecord.isValid() );
+ int nDeleted = 0;
+ for ( DiskLoc i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted, ++nDeleted );
+ ASSERT_EQUALS( 10, nDeleted );
+ ASSERT( nsd()->cappedLastDelRecLastExtent().isNull() );
+ }
+ private:
+ static void zero( DiskLoc *d ) {
+ memset( d, 0, sizeof( DiskLoc ) );
+ }
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":10}";
+ }
+ };
+
+ // This isn't a particularly useful test, and because it doesn't clean up
+ // after itself, /tmp/unittest needs to be cleared after running.
+ // class BigCollection : public Base {
+ // public:
+ // BigCollection() : Base( "NamespaceDetailsTests_BigCollection" ) {}
+ // void run() {
+ // create();
+ // ASSERT_EQUALS( 2, nExtents() );
+ // }
+ // private:
+ // virtual string spec() const {
+ // // NOTE 256 added to size in _userCreateNS()
+ // long long big = MongoDataFile::maxSize() - DataFileHeader::HeaderSize;
+ // stringstream ss;
+ // ss << "{\"capped\":true,\"size\":" << big << "}";
+ // return ss.str();
+ // }
+ // };
+
+ class Size {
+ public:
+ void run() {
+ ASSERT_EQUALS( 496U, sizeof( NamespaceDetails ) );
+ }
+ };
+
+ } // namespace NamespaceDetailsTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "namespace" ) {
+ }
+
+ void setupTests() {
+ add< IndexDetailsTests::Create >();
+ add< IndexDetailsTests::GetKeysFromObjectSimple >();
+ add< IndexDetailsTests::GetKeysFromObjectDotted >();
+ add< IndexDetailsTests::GetKeysFromArraySimple >();
+ add< IndexDetailsTests::GetKeysFromArrayFirstElement >();
+ add< IndexDetailsTests::GetKeysFromArraySecondElement >();
+ add< IndexDetailsTests::GetKeysFromSecondLevelArray >();
+ add< IndexDetailsTests::ParallelArraysBasic >();
+ add< IndexDetailsTests::ArraySubobjectBasic >();
+ add< IndexDetailsTests::ArraySubobjectMultiFieldIndex >();
+ add< IndexDetailsTests::ArraySubobjectSingleMissing >();
+ add< IndexDetailsTests::ArraySubobjectMissing >();
+ add< IndexDetailsTests::ArraySubelementComplex >();
+ add< IndexDetailsTests::ParallelArraysComplex >();
+ add< IndexDetailsTests::AlternateMissing >();
+ add< IndexDetailsTests::MultiComplex >();
+ add< IndexDetailsTests::EmptyArray >();
+ add< IndexDetailsTests::DoubleArray >();
+ add< IndexDetailsTests::DoubleEmptyArray >();
+ add< IndexDetailsTests::MultiEmptyArray >();
+ add< IndexDetailsTests::NestedEmptyArray >();
+ add< IndexDetailsTests::MultiNestedEmptyArray >();
+ add< IndexDetailsTests::UnevenNestedEmptyArray >();
+ add< IndexDetailsTests::ReverseUnevenNestedEmptyArray >();
+ add< IndexDetailsTests::SparseReverseUnevenNestedEmptyArray >();
+ add< IndexDetailsTests::SparseEmptyArray >();
+ add< IndexDetailsTests::SparseEmptyArraySecond >();
+ add< IndexDetailsTests::NonObjectMissingNestedField >();
+ add< IndexDetailsTests::SparseNonObjectMissingNestedField >();
+ add< IndexDetailsTests::IndexedArrayIndex >();
+ add< IndexDetailsTests::DoubleIndexedArrayIndex >();
+ add< IndexDetailsTests::ObjectWithinArray >();
+ add< IndexDetailsTests::ArrayWithinObjectWithinArray >();
+ add< IndexDetailsTests::MissingField >();
+ add< IndexDetailsTests::SubobjectMissing >();
+ add< IndexDetailsTests::CompoundMissing >();
+ add< NamespaceDetailsTests::Create >();
+ add< NamespaceDetailsTests::SingleAlloc >();
+ add< NamespaceDetailsTests::Realloc >();
+ add< NamespaceDetailsTests::TwoExtent >();
+ add< NamespaceDetailsTests::TruncateCapped >();
+ add< NamespaceDetailsTests::Migrate >();
+ // add< NamespaceDetailsTests::BigCollection >();
+ add< NamespaceDetailsTests::Size >();
+ }
+ } myall;
+} // namespace NamespaceTests
+
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
new file mode 100644
index 00000000000..e07ccb42aa6
--- /dev/null
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -0,0 +1,407 @@
+// pdfiletests.cpp : pdfile unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/pdfile.h"
+
+#include "../db/db.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+namespace PdfileTests {
+
+ namespace ScanCapped {
+
+ class Base {
+ public:
+ Base() : _context( ns() ) {
+ }
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ string n( ns() );
+ dropNS( n );
+ }
+ void run() {
+ stringstream spec;
+ spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}";
+ string err;
+ ASSERT( userCreateNS( ns(), fromjson( spec.str() ), err, false ) );
+ prepare();
+ int j = 0;
+ for ( boost::shared_ptr<Cursor> i = theDataFileMgr.findAll( ns() );
+ i->ok(); i->advance(), ++j )
+ ASSERT_EQUALS( j, i->current().firstElement().number() );
+ ASSERT_EQUALS( count(), j );
+
+ j = count() - 1;
+ for ( boost::shared_ptr<Cursor> i =
+ findTableScan( ns(), fromjson( "{\"$natural\":-1}" ) );
+ i->ok(); i->advance(), --j )
+ ASSERT_EQUALS( j, i->current().firstElement().number() );
+ ASSERT_EQUALS( -1, j );
+ }
+ protected:
+ virtual void prepare() = 0;
+ virtual int count() const = 0;
+ virtual int nExtents() const {
+ return 0;
+ }
+ // bypass standard alloc/insert routines to use the extent we want.
+ static DiskLoc insert( DiskLoc ext, int i ) {
+ BSONObjBuilder b;
+ b.append( "a", i );
+ BSONObj o = b.done();
+ int len = o.objsize();
+ Extent *e = ext.ext();
+ e = getDur().writing(e);
+ int ofs;
+ if ( e->lastRecord.isNull() )
+ ofs = ext.getOfs() + ( e->_extentData - (char *)e );
+ else
+ ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders;
+ DiskLoc dl( ext.a(), ofs );
+ Record *r = dl.rec();
+ r = (Record*) getDur().writingPtr(r, Record::HeaderSize + len);
+ r->lengthWithHeaders = Record::HeaderSize + len;
+ r->extentOfs = e->myLoc.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ r->prevOfs = e->lastRecord.isNull() ? DiskLoc::NullOfs : e->lastRecord.getOfs();
+ memcpy( r->data, o.objdata(), len );
+ if ( e->firstRecord.isNull() )
+ e->firstRecord = dl;
+ else
+ getDur().writingInt(e->lastRecord.rec()->nextOfs) = ofs;
+ e->lastRecord = dl;
+ return dl;
+ }
+ static const char *ns() {
+ return "unittests.ScanCapped";
+ }
+ static NamespaceDetails *nsd() {
+ return nsdetails( ns() );
+ }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class Empty : public Base {
+ virtual void prepare() {}
+ virtual int count() const {
+ return 0;
+ }
+ };
+
+ class EmptyLooped : public Base {
+ virtual void prepare() {
+ nsd()->writingWithExtra()->capFirstNewRecord = DiskLoc();
+ }
+ virtual int count() const {
+ return 0;
+ }
+ };
+
+ class EmptyMultiExtentLooped : public Base {
+ virtual void prepare() {
+ nsd()->writingWithExtra()->capFirstNewRecord = DiskLoc();
+ }
+ virtual int count() const {
+ return 0;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class Single : public Base {
+ virtual void prepare() {
+ nsd()->writingWithExtra()->capFirstNewRecord = insert( nsd()->capExtent, 0 );
+ }
+ virtual int count() const {
+ return 1;
+ }
+ };
+
+ class NewCapFirst : public Base {
+ virtual void prepare() {
+ DiskLoc x = insert( nsd()->capExtent, 0 );
+ nsd()->writingWithExtra()->capFirstNewRecord = x;
+ insert( nsd()->capExtent, 1 );
+ }
+ virtual int count() const {
+ return 2;
+ }
+ };
+
+ class NewCapLast : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 1 );
+ }
+ virtual int count() const {
+ return 2;
+ }
+ };
+
+ class NewCapMiddle : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 1 );
+ insert( nsd()->capExtent, 2 );
+ }
+ virtual int count() const {
+ return 3;
+ }
+ };
+
+ class FirstExtent : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 2;
+ }
+ };
+
+ class LastExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->lastExtent;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 2;
+ }
+ };
+
+ class MidExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ insert( nsd()->firstExtent, 2 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 3 );
+ insert( nsd()->capExtent, 4 );
+ }
+ virtual int count() const {
+ return 5;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class AloneInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->lastExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ }
+ virtual int count() const {
+ return 3;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class FirstInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->lastExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class LastInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ insert( nsd()->firstExtent, 2 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ } // namespace ScanCapped
+
+ namespace Insert {
+ class Base {
+ public:
+ Base() : _context( ns() ) {
+ }
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ string n( ns() );
+ dropNS( n );
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.pdfiletests.Insert";
+ }
+ static NamespaceDetails *nsd() {
+ return nsdetails( ns() );
+ }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class UpdateDate : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendTimestamp( "a" );
+ BSONObj o = b.done();
+ ASSERT( 0 == o.getField( "a" ).date() );
+ theDataFileMgr.insertWithObjMod( ns(), o );
+ ASSERT( 0 != o.getField( "a" ).date() );
+ }
+ };
+ } // namespace Insert
+
+ class ExtentSizing {
+ public:
+ struct SmallFilesControl {
+ SmallFilesControl() {
+ old = cmdLine.smallfiles;
+ cmdLine.smallfiles = false;
+ }
+ ~SmallFilesControl() {
+ cmdLine.smallfiles = old;
+ }
+ bool old;
+ };
+ void run() {
+ SmallFilesControl c;
+ // test that no matter what we start with, we always get to max extent size
+ for ( int obj=16; obj<BSONObjMaxUserSize; obj += 111 ) {
+ int sz = Extent::initialSize( obj );
+ for ( int i=0; i<100; i++ ) {
+ sz = Extent::followupSize( obj , sz );
+ }
+ ASSERT_EQUALS( Extent::maxSize() , sz );
+ }
+ }
+ };
+
+ class ExtentAllocOrder {
+ public:
+ void run() {
+ string dbname = "unittest_ex";
+
+ string c1 = dbname + ".x1";
+ string c2 = dbname + ".x2";
+
+ {
+ DBDirectClient db;
+ db.dropDatabase( dbname );
+ }
+
+ dblock mylock;
+ Client::Context cx( dbname );
+
+ bool isnew;
+ Database * d = dbHolderW().getOrCreate( dbname , dbpath , isnew );
+ assert( d );
+
+ int big = 10 * 1024;
+ //int small = 1024;
+
+ unsigned long long l = 0;
+ int n = 0;
+ while ( 1 ) {
+ n++;
+ if( n == 5 && sizeof(void*)==4 )
+ break;
+ MongoDataFile * f = d->addAFile( big , false );
+ //cout << f->length() << ' ' << n << endl;
+ if ( f->length() == l )
+ break;
+ l = f->length();
+ }
+
+ int start = d->numFiles();
+ for ( int i=0; i<start; i++ )
+ d->allocExtent( c1.c_str() , d->getFile( i )->getHeader()->unusedLength , false, false );
+ ASSERT_EQUALS( start , d->numFiles() );
+
+ {
+ DBDirectClient db;
+ db.dropDatabase( dbname );
+ }
+ }
+ };
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "pdfile" ) {}
+
+ void setupTests() {
+ add< ScanCapped::Empty >();
+ add< ScanCapped::EmptyLooped >();
+ add< ScanCapped::EmptyMultiExtentLooped >();
+ add< ScanCapped::Single >();
+ add< ScanCapped::NewCapFirst >();
+ add< ScanCapped::NewCapLast >();
+ add< ScanCapped::NewCapMiddle >();
+ add< ScanCapped::FirstExtent >();
+ add< ScanCapped::LastExtent >();
+ add< ScanCapped::MidExtent >();
+ add< ScanCapped::AloneInExtent >();
+ add< ScanCapped::FirstInExtent >();
+ add< ScanCapped::LastInExtent >();
+ add< Insert::UpdateDate >();
+ add< ExtentSizing >();
+ add< ExtentAllocOrder >();
+ }
+ } myall;
+
+} // namespace PdfileTests
+
diff --git a/src/mongo/dbtests/perf/btreeperf.cpp b/src/mongo/dbtests/perf/btreeperf.cpp
new file mode 100644
index 00000000000..7d68d8f5cc7
--- /dev/null
+++ b/src/mongo/dbtests/perf/btreeperf.cpp
@@ -0,0 +1,442 @@
+// btreeperf.cpp
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Performance timing and space utilization testing for btree indexes.
+ */
+
+#include <iostream>
+
+#include <boost/random/bernoulli_distribution.hpp>
+#include <boost/random/geometric_distribution.hpp>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/variate_generator.hpp>
+#include <boost/random/uniform_int.hpp>
+
+#include "client/dbclient.h"
+#include "../../util/timer.h"
+
+using namespace std;
+using namespace mongo;
+using namespace boost;
+
+const char *ns = "test.btreeperf";
+const char *db = "test";
+const char *index_collection = "btreeperf.$_id_";
+
+// This random number generator has a much larger period than the default
+// generator and is half as fast as the default. Given that we intend to
+// generate large numbers of documents and will utilize more than one random
+// sample per document, choosing this generator seems like a worthwhile tradeoff.
+mt19937 randomNumberGenerator;
+
+/**
+ * An interface for generating documents to be inserted and document specs for
+ * remove requests.
+ */
+class InsertAndRemoveStrategy {
+public:
+ virtual ~InsertAndRemoveStrategy() {}
+ virtual BSONObj insertObj() = 0;
+ virtual BSONObj removeObj() = 0;
+protected:
+ /**
+ * Helper functions for converting a sample value to a sample object with
+ * specified _id, to be inserted or removed.
+ */
+
+ template< class T >
+ BSONObj insertObjWithVal( const T &val ) {
+ BSONObjBuilder b;
+ b.append( "_id", val );
+ return b.obj();
+ }
+ template< class T >
+ BSONObj removeObjWithVal( const T &val ) {
+ BSONObjBuilder b;
+ b.append( "_id", val );
+ return b.obj();
+ }
+};
+
+/**
+ * Manages a set of elements of type T. Supports inserting unique elements and
+ * sampling a random element without replacement.
+ *
+ * TODO In the contexts where this class is currently used, duplicate keys are
+ * either impossible or highly unlikely. And an occasional duplicate value will
+ * not much affect the procedure by wich a random element is chosen. We could
+ * stop checking for duplicates in push(), eliminate _set from the implementaiton,
+ * and potentially improve performance and memory requirements somewhat.
+ */
+template< class T >
+class SetSampler {
+public:
+ /** @param val Insert this value in the set if not already present. */
+ void push( const T& val ) {
+ if ( _set.insert( val ).second ) {
+ _vector.push_back( val );
+ }
+ }
+ /** @return a random element removed from the set */
+ T pull() {
+ if ( _vector.size() == 0 ) {
+ return T();
+ }
+ uniform_int< size_t > sizeRange( 0, _vector.size() - 1 );
+ variate_generator< mt19937&, uniform_int< size_t > > sizeGenerator( randomNumberGenerator, sizeRange );
+ size_t toRemove = sizeGenerator();
+ T val = _vector[ toRemove ];
+ // Replace the random element with the last element, then remove the
+ // last element.
+ _vector[ toRemove ] = _vector.back();
+ _vector.pop_back();
+ _set.erase( val );
+ return val;
+ }
+private:
+ vector< T > _vector;
+ set< T > _set;
+};
+
+/**
+ * Tracks values that have been specified for insertion by the derived class's
+ * implementation of insertVal() and selects uniformally from among values that
+ * have been inserted but not yet removed for the next value to remove.
+ *
+ * The implementation is probabilistically sound, but may be resource intensive
+ * and slow due to the use of a SetSampler.
+ */
+template< class T >
+class InsertAndUniformRemoveStrategy : public InsertAndRemoveStrategy {
+public:
+ virtual BSONObj insertObj() {
+ T val = insertVal();
+ _sampler.push( val );
+ return insertObjWithVal( val );
+ }
+ virtual BSONObj removeObj() { return removeObjWithVal( _sampler.pull() ); }
+protected:
+ /** @return value to insert. This is the only function a derived class need implement. */
+ virtual T insertVal() = 0;
+private:
+ SetSampler< T > _sampler;
+};
+
+/**
+ * The derived class supplies keys to be inserted and removed. The key removal
+ * strategy is similar to the strategy for selecting a random element described
+ * in the MongoDB cookbook: the first key in the collection greater than or
+ * equal to the supplied removal key is removed. This allows selecting an
+ * exising key for removal without the overhead required by a SetSampler.
+ *
+ * While this ranged selection strategy can work well for selecting a random
+ * element, there are some theoretical and empirically observed shortcomings
+ * when the strategy is applied to removing nodes for btree performance measurement:
+ * 1 The likelihood that a given key is removed is proportional to the difference
+ * in value between it and the previous key. Because key deletion increases
+ * the difference in value between adjacent keys, neighboring keys will be
+ * more likely to be deleted than they would be in a true uniform distribution.
+ * 2 MongoDB 1.6 uses 'unused' nodes in the btree implementation. With a ranged
+ * removal strategy, those nodes must be traversed to find a node available
+ * for removal.
+ * 3 Ranged removal was observed to be biased against the balancing policy of
+ * MongoDB 1.7 in some cases, in terms of storage size. This may be a
+ * consequence of point 1 above.
+ * 4 Ranged removal was observed to be significantly biased against the btree
+ * implementation in MongoDB 1.6 in terms of performance. This is likely a
+ * consequence of point 2 above.
+ * 5 In some cases the biases described above were not evident in tests lasting
+ * several minutes, but were evident in tests lasting several hours.
+ */
+template< class T >
+class InsertAndRangedRemoveStrategy : public InsertAndRemoveStrategy {
+public:
+ virtual BSONObj insertObj() { return insertObjWithVal( insertVal() ); }
+ virtual BSONObj removeObj() { return rangedRemoveObjWithVal( removeVal() ); }
+protected:
+ /** Small likelihood that this removal spec will not match any document */
+ template< class U >
+ BSONObj rangedRemoveObjWithVal( const U &val ) {
+ BSONObjBuilder b1;
+ BSONObjBuilder b2( b1.subobjStart( "_id" ) );
+ b2.append( "$gte", val );
+ b2.done();
+ return b1.obj();
+ }
+ virtual T insertVal() = 0;
+ virtual T removeVal() = 0;
+};
+
+/**
+ * Integer Keys
+ * Uniform Inserts
+ * Uniform Removes
+ */
+class UniformInsertRangedUniformRemoveInteger : public InsertAndRangedRemoveStrategy< long long > {
+public:
+ UniformInsertRangedUniformRemoveInteger() :
+ _uniform_int( 0ULL, ~0ULL ),
+ _nextLongLong( randomNumberGenerator, _uniform_int ) {
+ }
+ /** Small likelihood of duplicates */
+ virtual long long insertVal() { return _nextLongLong(); }
+ virtual long long removeVal() { return _nextLongLong(); }
+private:
+ uniform_int< unsigned long long > _uniform_int;
+ variate_generator< mt19937&, uniform_int< unsigned long long > > _nextLongLong;
+};
+
+class UniformInsertUniformRemoveInteger : public InsertAndUniformRemoveStrategy< long long > {
+public:
+ virtual long long insertVal() { return _gen.insertVal(); }
+private:
+ UniformInsertRangedUniformRemoveInteger _gen;
+};
+
+/**
+ * String Keys
+ * Uniform Inserts
+ * Uniform Removes
+ */
+class UniformInsertRangedUniformRemoveString : public InsertAndRangedRemoveStrategy< string > {
+public:
+ UniformInsertRangedUniformRemoveString() :
+ _geometric_distribution( 0.9 ),
+ _nextLength( randomNumberGenerator, _geometric_distribution ),
+ _uniform_char( 'a', 'z' ),
+ _nextChar( randomNumberGenerator, _uniform_char ) {
+ }
+ /** Small likelihood of duplicates */
+ virtual string insertVal() { return nextString(); }
+ virtual string removeVal() { return nextString(); }
+private:
+ string nextString() {
+ // The longer the minimum string length, the lower the likelihood of duplicates
+ int len = _nextLength() + 5;
+ len = len > 100 ? 100 : len;
+ string ret( len, 'x' );
+ for( int i = 0; i < len; ++i ) {
+ ret[ i ] = _nextChar();
+ }
+ return ret;
+ }
+ geometric_distribution<> _geometric_distribution;
+ variate_generator< mt19937&, geometric_distribution<> > _nextLength;
+ uniform_int< char > _uniform_char;
+ variate_generator< mt19937&, uniform_int< char > > _nextChar;
+};
+
+class UniformInsertUniformRemoveString : public InsertAndUniformRemoveStrategy< string > {
+public:
+ virtual string insertVal() { return _gen.insertVal(); }
+private:
+ UniformInsertRangedUniformRemoveString _gen;
+};
+
+/**
+ * OID Keys
+ * Increasing Inserts
+ * Uniform Removes
+ */
+class IncreasingInsertRangedUniformRemoveOID : public InsertAndRangedRemoveStrategy< OID > {
+public:
+ IncreasingInsertRangedUniformRemoveOID() :
+ _max( -1 ) {
+ }
+ virtual OID insertVal() { return oidFromULL( ++_max ); }
+ virtual OID removeVal() {
+ uniform_int< unsigned long long > distribution( 0, _max > 0 ? _max : 0 );
+ variate_generator< mt19937&, uniform_int< unsigned long long > > generator( randomNumberGenerator, distribution );
+ return oidFromULL( generator() );
+ }
+private:
+ static OID oidFromULL( unsigned long long val ) {
+ val = __builtin_bswap64( val );
+ OID oid;
+ oid.clear();
+ memcpy( (char*)&oid + 4, &val, 8 );
+ return oid;
+ }
+ long long _max;
+};
+
+class IncreasingInsertUniformRemoveOID : public InsertAndUniformRemoveStrategy< OID > {
+public:
+ virtual OID insertVal() { return _gen.insertVal(); }
+private:
+ IncreasingInsertRangedUniformRemoveOID _gen;
+};
+
+/**
+ * Integer Keys
+ * Increasing Inserts
+ * Increasing Removes (on remove, the lowest key is always removed)
+ */
+class IncreasingInsertIncreasingRemoveInteger : public InsertAndRemoveStrategy {
+public:
+ IncreasingInsertIncreasingRemoveInteger() :
+ // Start with a large value so data type will be preserved if we round
+ // trip through json.
+ _min( 1LL << 32 ),
+ _max( 1LL << 32 ) {
+ }
+ virtual BSONObj insertObj() { return insertObjWithVal( ++_max ); }
+ virtual BSONObj removeObj() { return removeObjWithVal( _min < _max ? ++_min : _min ); }
+private:
+ long long _min;
+ long long _max;
+};
+
+/** Generate a random boolean value. */
+class BernoulliGenerator {
+public:
+ /**
+ * @param excessFalsePercent This specifies the desired rate of false values
+ * vs true values. If we want false to be 5% more likely than true, we
+ * specify 5 for this argument.
+ */
+ BernoulliGenerator( int excessFalsePercent ) :
+ _bernoulli_distribution( 1.0 / ( 2.0 + excessFalsePercent / 100.0 ) ),
+ _generator( randomNumberGenerator, _bernoulli_distribution ) {
+ }
+ bool operator()() { return _generator(); }
+private:
+ bernoulli_distribution<> _bernoulli_distribution;
+ variate_generator< mt19937&, bernoulli_distribution<> > _generator;
+};
+
+/** Runs a strategy on a connection, with specified mix of inserts and removes. */
+class InsertAndRemoveRunner {
+public:
+ InsertAndRemoveRunner( DBClientConnection &conn, InsertAndRemoveStrategy &strategy, int excessInsertPercent ) :
+ _conn( conn ),
+ _strategy( strategy ),
+ _nextOpTypeRemove( excessInsertPercent ) {
+ }
+ void writeOne() {
+ if ( _nextOpTypeRemove() ) {
+ _conn.remove( ns, _strategy.removeObj(), true );
+ }
+ else {
+ _conn.insert( ns, _strategy.insertObj() );
+ }
+ }
+private:
+ DBClientConnection &_conn;
+ InsertAndRemoveStrategy &_strategy;
+ BernoulliGenerator _nextOpTypeRemove;
+};
+
+/**
+ * Writes a test script to cout based on a strategy and specified mix of inserts
+ * and removes. The script can be subsequently executed by InsertAndRemoveRunner.
+ * Script generation is intended for strategies that are memory or cpu intensive
+ * and might either divert resources from a mongod instance being analyzed on the
+ * same machine or fail to generate requests as quickly as the mongod might
+ * accept them.
+ * The script contains one line per operation. Each line begins
+ * with a letter indicating the operation type, followed by a space. Next
+ * follows the json representation of a document for the specified operation
+ * type.
+ */
+class InsertAndRemoveScriptGenerator {
+public:
+ InsertAndRemoveScriptGenerator( InsertAndRemoveStrategy &strategy, int excessInsertPercent ) :
+ _strategy( strategy ),
+ _nextOpTypeRemove( excessInsertPercent ) {
+ }
+ void writeOne() {
+ if ( _nextOpTypeRemove() ) {
+ cout << "r " << _strategy.removeObj().jsonString() << endl;
+ }
+ else {
+ cout << "i " << _strategy.insertObj().jsonString() << endl;
+ }
+ }
+private:
+ InsertAndRemoveStrategy &_strategy;
+ BernoulliGenerator _nextOpTypeRemove;
+};
+
+/**
+ * Run a test script from cin that was generated by
+ * InsertAndRemoveScriptGenerator. Running the script is intended to be
+ * lightweight in terms of memory and cpu usage, and fast.
+ */
+class InsertAndRemoveScriptRunner {
+public:
+ InsertAndRemoveScriptRunner( DBClientConnection &conn ) :
+ _conn( conn ) {
+ }
+ void writeOne() {
+ cin.getline( _buf, 1024 );
+ BSONObj val = fromjson( _buf + 2 );
+ if ( _buf[ 0 ] == 'r' ) {
+ _conn.remove( ns, val, true );
+ }
+ else {
+ _conn.insert( ns, val );
+ }
+ }
+private:
+ DBClientConnection &_conn;
+ char _buf[ 1024 ];
+};
+
+int main( int argc, const char **argv ) {
+
+ DBClientConnection conn;
+ conn.connect( "127.0.0.1:27017" );
+ conn.dropCollection( ns );
+
+// UniformInsertRangedUniformRemoveInteger strategy;
+// UniformInsertUniformRemoveInteger strategy;
+// UniformInsertRangedUniformRemoveString strategy;
+// UniformInsertUniformRemoveString strategy;
+// IncreasingInsertRangedUniformRemoveOID strategy;
+// IncreasingInsertUniformRemoveOID strategy;
+// IncreasingInsertIncreasingRemoveInteger strategy;
+// InsertAndRemoveScriptGenerator runner( strategy, 5 );
+ InsertAndRemoveScriptRunner runner( conn );
+
+ Timer t;
+ BSONObj statsCmd = BSON( "collstats" << index_collection );
+
+ // Print header, unless we are generating a script (in that case, comment this out).
+ cout << "ops,milliseconds,docs,totalBucketSize" << endl;
+
+ long long i = 0;
+ long long n = 10000000000;
+ while( i < n ) {
+ runner.writeOne();
+ // Print statistics, unless we are generating a script (in that case, comment this out).
+ // The stats collection requests below provide regular read operations,
+ // ensuring we are caught up with the progress being made by the mongod
+ // under analysis.
+ if ( ++i % 50000 == 0 ) {
+ // The total number of documents present.
+ long long docs = conn.count( ns );
+ BSONObj result;
+ conn.runCommand( db, statsCmd, result );
+ // The total number of bytes used for all allocated 8K buckets of the
+ // btree.
+ long long totalBucketSize = result.getField( "count" ).numberLong() * 8192;
+ cout << i << ',' << t.millis() << ',' << docs << ',' << totalBucketSize << endl;
+ }
+ }
+}
diff --git a/src/mongo/dbtests/perf/perftest.cpp b/src/mongo/dbtests/perf/perftest.cpp
new file mode 100644
index 00000000000..b6219f7f5d9
--- /dev/null
+++ b/src/mongo/dbtests/perf/perftest.cpp
@@ -0,0 +1,761 @@
+// perftest.cpp : Run db performance tests.
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "../../client/dbclient.h"
+#include "../../db/instance.h"
+#include "../../db/ops/query.h"
+#include "../../db/queryoptimizer.h"
+#include "../../util/file_allocator.h"
+
+#include "../framework.h"
+#include <boost/date_time/posix_time/posix_time.hpp>
+
+namespace mongo {
+ extern string dbpath;
+} // namespace mongo
+
+
+using namespace mongo;
+using namespace mongo::regression;
+
+DBClientBase *client_;
+
+// Each test runs with a separate db, so no test does any of the startup
+// (ie allocation) work for another test.
+template< class T >
+string testDb( T *t = 0 ) {
+ string name = mongo::demangleName( typeid( T ) );
+ // Make filesystem safe.
+ for( string::iterator i = name.begin(); i != name.end(); ++i )
+ if ( *i == ':' )
+ *i = '_';
+ return name;
+}
+
+template< class T >
+string testNs( T *t ) {
+ stringstream ss;
+ ss << testDb( t ) << ".perftest";
+ return ss.str();
+}
+
+template <class T>
+class Runner {
+public:
+ void run() {
+ T test;
+ string name = testDb( &test );
+ boost::posix_time::ptime start = boost::posix_time::microsec_clock::universal_time();
+ test.run();
+ boost::posix_time::ptime end = boost::posix_time::microsec_clock::universal_time();
+ long long micro = ( end - start ).total_microseconds();
+ cout << "{'" << name << "': "
+ << micro / 1000000
+ << "."
+ << setw( 6 ) << setfill( '0' ) << micro % 1000000
+ << "}" << endl;
+ }
+ ~Runner() {
+ FileAllocator::get()->waitUntilFinished();
+ client_->dropDatabase( testDb< T >().c_str() );
+ }
+};
+
+class RunnerSuite : public Suite {
+public:
+ RunnerSuite( string name ) : Suite( name ) {}
+protected:
+ template< class T >
+ void add() {
+ Suite::add< Runner< T > >();
+ }
+};
+
+namespace Insert {
+ class IdIndex {
+ public:
+ void run() {
+ string ns = testNs( this );
+ for( int i = 0; i < 100000; ++i ) {
+ client_->insert( ns.c_str(), BSON( "_id" << i ) );
+ }
+ }
+ };
+
+ class TwoIndex {
+ public:
+ TwoIndex() : ns_( testNs( this ) ) {
+ client_->ensureIndex( ns_, BSON( "_id" << 1 ), "my_id" );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class TenIndex {
+ public:
+ TenIndex() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaa";
+ for( int i = 0; i < 9; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( "_id" << 1 ), false, names + i );
+ }
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class Capped {
+ public:
+ Capped() : ns_( testNs( this ) ) {
+ client_->createCollection( ns_.c_str(), 100000, true );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class OneIndexReverse {
+ public:
+ OneIndexReverse() : ns_( testNs( this ) ) {
+ client_->ensureIndex( ns_, BSON( "_id" << 1 ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << ( 100000 - 1 - i ) ) );
+ }
+ string ns_;
+ };
+
+ class OneIndexHighLow {
+ public:
+ OneIndexHighLow() : ns_( testNs( this ) ) {
+ client_->ensureIndex( ns_, BSON( "_id" << 1 ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i ) {
+ int j = 50000 + ( ( i % 2 == 0 ) ? 1 : -1 ) * ( i / 2 + 1 );
+ client_->insert( ns_.c_str(), BSON( "_id" << j ) );
+ }
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "insert" ) {}
+
+ void setupTests() {
+ add< IdIndex >();
+ add< TwoIndex >();
+ add< TenIndex >();
+ add< Capped >();
+ add< OneIndexReverse >();
+ add< OneIndexHighLow >();
+ }
+ } all;
+} // namespace Insert
+
+namespace Update {
+ class Smaller {
+ public:
+ Smaller() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "b" << 2 ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class Bigger {
+ public:
+ Bigger() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "_id" << i << "b" << 2 ) );
+ }
+ string ns_;
+ };
+
+ class Inc {
+ public:
+ Inc() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 10000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "i" << 0 ) );
+ }
+ void run() {
+ for( int j = 0; j < 10; ++j )
+ for( int i = 0; i < 10000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "$inc" << BSON( "i" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class Set {
+ public:
+ Set() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 10000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "i" << 0 ) );
+ }
+ void run() {
+ for( int j = 1; j < 11; ++j )
+ for( int i = 0; i < 10000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "$set" << BSON( "i" << j ) ) );
+ }
+ string ns_;
+ };
+
+ class SetGrow {
+ public:
+ SetGrow() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 10000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "i" << "" ) );
+ }
+ void run() {
+ for( int j = 9; j > -1; --j )
+ for( int i = 0; i < 10000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "$set" << BSON( "i" << "aaaaaaaaaa"[j] ) ) );
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "update" ) {}
+ void setupTests() {
+ add< Smaller >();
+ add< Bigger >();
+ add< Inc >();
+ add< Set >();
+ add< SetGrow >();
+ }
+ } all;
+} // namespace Update
+
+namespace BSON {
+
+ const char *sample =
+ "{\"one\":2, \"two\":5, \"three\": {},"
+ "\"four\": { \"five\": { \"six\" : 11 } },"
+ "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
+ "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
+ "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
+ "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"02\" },"
+ "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }";
+
+ const char *shopwikiSample =
+ "{ '_id' : '289780-80f85380b5c1d4a0ad75d1217673a4a2' , 'site_id' : 289780 , 'title'"
+ ": 'Jubilee - Margaret Walker' , 'image_url' : 'http://www.heartlanddigsandfinds.c"
+ "om/store/graphics/Product_Graphics/Product_8679.jpg' , 'url' : 'http://www.heartla"
+ "nddigsandfinds.com/store/store_product_detail.cfm?Product_ID=8679&Category_ID=2&Su"
+ "b_Category_ID=910' , 'url_hash' : 3450626119933116345 , 'last_update' : null , '"
+ "features' : { '$imagePrefetchDate' : '2008Aug30 22:39' , '$image.color.rgb' : '5a7"
+ "574' , 'Price' : '$10.99' , 'Description' : 'Author--s 1st Novel. A Houghton Miffl"
+ "in Literary Fellowship Award novel by the esteemed poet and novelist who has demon"
+ "strated a lifelong commitment to the heritage of black culture. An acclaimed story"
+ "of Vyry, a negro slave during the 19th Century, facing the biggest challenge of h"
+ "er lifetime - that of gaining her freedom, fighting for all the things she had nev"
+ "er known before. The author, great-granddaughter of Vyry, reveals what the Civil W"
+ "ar in America meant to the Negroes. Slavery W' , '$priceHistory-1' : '2008Dec03 $1"
+ "0.99' , 'Brand' : 'Walker' , '$brands_in_title' : 'Walker' , '--path' : '//HTML[1]"
+ "/BODY[1]/TABLE[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]/TD[1]/TABLE[1]/TR[2]/TD[2]/TABLE"
+ "[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]' , '~location' : 'en_US' , '$crawled' : '2009J"
+ "an11 03:22' , '$priceHistory-2' : '2008Nov15 $10.99' , '$priceHistory-0' : '2008De"
+ "c24 $10.99'}}";
+
+ class Parse {
+ public:
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ fromjson( sample );
+ }
+ };
+
+ class ShopwikiParse {
+ public:
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ fromjson( shopwikiSample );
+ }
+ };
+
+ class Json {
+ public:
+ Json() : o_( fromjson( sample ) ) {}
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ o_.jsonString();
+ }
+ BSONObj o_;
+ };
+
+ class ShopwikiJson {
+ public:
+ ShopwikiJson() : o_( fromjson( shopwikiSample ) ) {}
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ o_.jsonString();
+ }
+ BSONObj o_;
+ };
+
+ template <int LEN>
+ class Copy {
+ public:
+ Copy(){
+ // putting it in a subobject to force copy on getOwned
+ BSONObjBuilder outer;
+ BSONObjBuilder b (outer.subobjStart("inner"));
+ while (b.len() < LEN)
+ b.append(BSONObjBuilder::numStr(b.len()), b.len());
+ b.done();
+ _base = outer.obj();
+ }
+
+ void run() {
+ int iterations = 1000*1000;
+ while (iterations--){
+ BSONObj temp = copy(_base.firstElement().embeddedObject().getOwned());
+ }
+ }
+
+ private:
+ // noinline should force copying even when optimized
+ NOINLINE_DECL BSONObj copy(BSONObj x){
+ return x;
+ }
+
+ BSONObj _base;
+ };
+
+
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "bson" ) {}
+ void setupTests() {
+ add< Parse >();
+ add< ShopwikiParse >();
+ add< Json >();
+ add< ShopwikiJson >();
+ add< Copy<10> >();
+ add< Copy<100> >();
+ add< Copy<1000> >();
+ add< Copy<10*1000> >();
+ }
+ } all;
+
+} // namespace BSON
+
+namespace Index {
+
+ class Int {
+ public:
+ Int() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i ) );
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class ObjectId {
+ public:
+ ObjectId() : ns_( testNs( this ) ) {
+ OID id;
+ for( int i = 0; i < 100000; ++i ) {
+ id.init();
+ client_->insert( ns_.c_str(), BSON( "a" << id ) );
+ }
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class String {
+ public:
+ String() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i ) {
+ stringstream ss;
+ ss << i;
+ client_->insert( ns_.c_str(), BSON( "a" << ss.str() ) );
+ }
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class Object {
+ public:
+ Object() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i ) {
+ client_->insert( ns_.c_str(), BSON( "a" << BSON( "a" << i ) ) );
+ }
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "index" ) {}
+ void setupTests() {
+ add< Int >();
+ add< ObjectId >();
+ add< String >();
+ add< Object >();
+ }
+ } all;
+
+} // namespace Index
+
+namespace QueryTests {
+
+ class NoMatch {
+ public:
+ NoMatch() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ client_->findOne( ns_.c_str(), QUERY( "_id" << 100000 ) );
+ }
+ string ns_;
+ };
+
+ class NoMatchIndex {
+ public:
+ NoMatchIndex() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ client_->findOne( ns_.c_str(),
+ QUERY( "a" << "b" ).hint( BSON( "_id" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class NoMatchLong {
+ public:
+ NoMatchLong() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaaa";
+ for( int i = 0; i < 100000; ++i ) {
+ BSONObjBuilder b;
+ for( int j = 0; j < 10; ++j )
+ b << ( names + j ) << i;
+ client_->insert( ns_.c_str(), b.obj() );
+ }
+ }
+ void run() {
+ client_->findOne( ns_.c_str(), QUERY( "a" << 100000 ) );
+ }
+ string ns_;
+ };
+
+ class SortOrdered {
+ public:
+ SortOrdered() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 50000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ auto_ptr< DBClientCursor > c =
+ client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
+ int i = 0;
+ for( ; c->more(); c->nextSafe(), ++i );
+ ASSERT_EQUALS( 50000, i );
+ }
+ string ns_;
+ };
+
+ class SortReverse {
+ public:
+ SortReverse() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 50000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << ( 50000 - 1 - i ) ) );
+ }
+ void run() {
+ auto_ptr< DBClientCursor > c =
+ client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
+ int i = 0;
+ for( ; c->more(); c->nextSafe(), ++i );
+ ASSERT_EQUALS( 50000, i );
+ }
+ string ns_;
+ };
+
+ class GetMore {
+ public:
+ GetMore() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i ) );
+ c_ = client_->query( ns_.c_str(), Query() );
+ }
+ void run() {
+ int i = 0;
+ for( ; c_->more(); c_->nextSafe(), ++i );
+ ASSERT_EQUALS( 100000, i );
+ }
+ string ns_;
+ auto_ptr< DBClientCursor > c_;
+ };
+
+ class GetMoreIndex {
+ public:
+ GetMoreIndex() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i ) );
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ c_ = client_->query( ns_.c_str(), QUERY( "a" << GT << -1 ).hint( BSON( "a" << 1 ) ) );
+ }
+ void run() {
+ int i = 0;
+ for( ; c_->more(); c_->nextSafe(), ++i );
+ ASSERT_EQUALS( 100000, i );
+ }
+ string ns_;
+ auto_ptr< DBClientCursor > c_;
+ };
+
+ class GetMoreKeyMatchHelps {
+ public:
+ GetMoreKeyMatchHelps() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 1000000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i << "b" << i % 10 << "c" << "d" ) );
+ client_->ensureIndex( ns_, BSON( "a" << 1 << "b" << 1 ) );
+ c_ = client_->query( ns_.c_str(), QUERY( "a" << GT << -1 << "b" << 0 ).hint( BSON( "a" << 1 << "b" << 1 ) ) );
+ }
+ void run() {
+ int i = 0;
+ for( ; c_->more(); c_->nextSafe(), ++i );
+ ASSERT_EQUALS( 100000, i );
+ }
+ string ns_;
+ auto_ptr< DBClientCursor > c_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "query" ) {}
+ void setupTests() {
+ add< NoMatch >();
+ add< NoMatchIndex >();
+ add< NoMatchLong >();
+ add< SortOrdered >();
+ add< SortReverse >();
+ add< GetMore >();
+ add< GetMoreIndex >();
+ add< GetMoreKeyMatchHelps >();
+ }
+ } all;
+
+} // namespace QueryTests
+
+namespace Count {
+
+ class Count {
+ public:
+ Count() : ns_( testNs( this ) ) {
+ BSONObj obj = BSON( "a" << 1 );
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_, obj );
+ }
+ void run() {
+ ASSERT_EQUALS( 100000U, client_->count( ns_, BSON( "a" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class CountIndex {
+ public:
+ CountIndex() : ns_( testNs( this ) ) {
+ BSONObj obj = BSON( "a" << 1 );
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_, obj );
+ client_->ensureIndex( ns_, obj );
+ }
+ void run() {
+ // 'simple' match does not work for numbers
+ ASSERT_EQUALS( 100000U, client_->count( ns_, BSON( "a" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class CountSimpleIndex {
+ public:
+ CountSimpleIndex() : ns_( testNs( this ) ) {
+ BSONObj obj = BSON( "a" << "b" );
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_, obj );
+ client_->ensureIndex( ns_, obj );
+ }
+ void run() {
+ ASSERT_EQUALS( 100000U, client_->count( ns_, BSON( "a" << "b" ) ) );
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "count" ) {}
+ void setupTests() {
+ add< Count >();
+ add< CountIndex >();
+ add< CountSimpleIndex >();
+ }
+ } all;
+
+} // namespace Count
+
+namespace Plan {
+
+ class Hint {
+ public:
+ Hint() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaa";
+ for( int i = 0; i < 9; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
+ }
+ lk_.reset( new dblock );
+ Client::Context ctx( ns_ );
+ hint_ = BSON( "hint" << BSON( "a" << 1 ) );
+ hintElt_ = hint_.firstElement();
+ }
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ MultiPlanScanner s( ns_.c_str(), BSONObj(), BSONObj(), &hintElt_ );
+ }
+ string ns_;
+ auto_ptr< dblock > lk_;
+ BSONObj hint_;
+ BSONElement hintElt_;
+ };
+
+ class Sort {
+ public:
+ Sort() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaaa";
+ for( int i = 0; i < 10; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
+ }
+ lk_.reset( new dblock );
+ }
+ void run() {
+ Client::Context ctx( ns_ );
+ for( int i = 0; i < 10000; ++i )
+ MultiPlanScanner s( ns_.c_str(), BSONObj(), BSON( "a" << 1 ) );
+ }
+ string ns_;
+ auto_ptr< dblock > lk_;
+ };
+
+ class Query {
+ public:
+ Query() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaaa";
+ for( int i = 0; i < 10; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
+ }
+ lk_.reset( new dblock );
+ }
+ void run() {
+ Client::Context ctx( ns_.c_str() );
+ for( int i = 0; i < 10000; ++i )
+ MultiPlanScanner s( ns_.c_str(), BSON( "a" << 1 ), BSONObj() );
+ }
+ string ns_;
+ auto_ptr< dblock > lk_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite("plan" ) {}
+ void setupTests() {
+ add< Hint >();
+ add< Sort >();
+ add< Query >();
+ }
+ } all;
+} // namespace Plan
+
+namespace Misc {
+ class TimeMicros64 {
+ public:
+ void run() {
+ int iterations = 1000*1000;
+ while(iterations--){
+ curTimeMicros64();
+ }
+ }
+ };
+
+ class JSTime {
+ public:
+ void run() {
+ int iterations = 1000*1000;
+ while(iterations--){
+ jsTime();
+ }
+ }
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite("misc") {}
+ void setupTests() {
+ add< TimeMicros64 >();
+ add< JSTime >();
+ }
+ } all;
+}
+
+int main( int argc, char **argv ) {
+ logLevel = -1;
+ client_ = new DBDirectClient();
+
+ return Suite::run(argc, argv, "/data/db/perftest");
+}
+
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
new file mode 100644
index 00000000000..284e3991f15
--- /dev/null
+++ b/src/mongo/dbtests/perftests.cpp
@@ -0,0 +1,1029 @@
+/** @file perftests.cpp.cpp : unit tests relating to performance
+
+ The idea herein is tests that run fast and can be part of the normal CI suite. So no tests herein that take
+ a long time to run. Obviously we need those too, but they will be separate.
+
+ These tests use DBDirectClient; they are a bit white-boxish.
+*/
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include <fstream>
+#include "../db/ops/query.h"
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+#include "../db/ops/update.h"
+#include "../db/taskqueue.h"
+#include "../util/timer.h"
+#include "dbtests.h"
+#include "../db/dur_stats.h"
+#include "../util/checksum.h"
+#include "../util/version.h"
+#include "../db/key.h"
+#include "../util/compress.h"
+
+using namespace bson;
+
+namespace mongo {
+ namespace regression {
+ extern unsigned perfHist;
+ }
+}
+
+namespace PerfTests {
+
+ const bool profiling = false;
+
+ typedef DBDirectClient DBClientType;
+ //typedef DBClientConnection DBClientType;
+
+ class ClientBase {
+ public:
+ // NOTE: Not bothering to backup the old error record.
+ ClientBase() {
+ //_client.connect("localhost");
+ mongo::lastError.reset( new LastError() );
+ }
+ virtual ~ClientBase() {
+ //mongo::lastError.release();
+ }
+ protected:
+ static void insert( const char *ns, BSONObj o ) {
+ _client.insert( ns, o );
+ }
+ static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
+ _client.update( ns, Query( q ), o, upsert );
+ }
+ static bool error() {
+ return !_client.getPrevError().getField( "err" ).isNull();
+ }
+ DBClientBase &client() const { return _client; }
+ private:
+ static DBClientType _client;
+ };
+ DBClientType ClientBase::_client;
+
+ // todo: use a couple threads. not a very good test yet.
+ class TaskQueueTest {
+ static int tot;
+ struct V {
+ int val;
+ static void go(const V &v) { tot += v.val; }
+ };
+ public:
+ void run() {
+ tot = 0;
+ TaskQueue<V> d;
+ int x = 0;
+ for( int i = 0; i < 100; i++ ) {
+ if( i % 30 == 0 )
+ d.invoke();
+
+ x += i;
+ writelock lk;
+ V v;
+ v.val = i;
+ d.defer(v);
+ }
+ d.invoke();
+ assert( x == tot );
+ }
+ };
+ int TaskQueueTest::tot;
+
+ class B : public ClientBase {
+ string _ns;
+ protected:
+ const char *ns() { return _ns.c_str(); }
+
+ // anything you want to do before being timed
+ virtual void prep() { }
+
+ virtual void timed() = 0;
+
+ // optional 2nd test phase to be timed separately
+ // return name of it
+ virtual string timed2(DBClientBase&) { return ""; }
+
+ virtual void post() { }
+
+ virtual string name() = 0;
+
+ // how long to run test. 0 is a sentinel which means just run the timed() method once and time it.
+ virtual int howLongMillis() { return profiling ? 60000 : 5000; }
+
+ /* override if your test output doesn't need that */
+ virtual bool showDurStats() { return true; }
+
+ static boost::shared_ptr<DBClientConnection> conn;
+ static string _perfhostname;
+ static unsigned once;
+
+ public:
+ /* if you want recording of the timings, place the password for the perf database
+ in ./../settings.py:
+ pstatspassword="<pwd>"
+ */
+ void connect() {
+ if( once )
+ return;
+ ++once;
+
+ // no writing to perf db if _DEBUG
+ DEV return;
+
+ const char *fn = "../../settings.py";
+ if( !exists(fn) ) {
+ if( exists("settings.py") )
+ fn = "settings.py";
+ else {
+ cout << "no ../../settings.py or ./settings.py file found. will not write perf stats to pstats db." << endl;
+ cout << "it is recommended this be enabled even on dev boxes" << endl;
+ return;
+ }
+ }
+
+ try {
+ if( conn == 0 ) {
+ MemoryMappedFile f;
+ const char *p = (const char *) f.mapWithOptions(fn, MongoFile::READONLY);
+ string pwd;
+
+ {
+ const char *q = str::after(p, "pstatspassword=\"");
+ if( *q == 0 ) {
+ cout << "info perftests.cpp: no pstatspassword= in settings.py" << endl;
+ return;
+ }
+ else {
+ pwd = str::before(q, '\"');
+ }
+ }
+
+ boost::shared_ptr<DBClientConnection> c(new DBClientConnection(false, 0, 60));
+ string err;
+ if( c->connect("perfdb.10gen.cc", err) ) {
+ if( !c->auth("perf", "perf", pwd, err) ) {
+ cout << "info: authentication with stats db failed: " << err << endl;
+ assert(false);
+ }
+ conn = c;
+
+ // override the hostname with the buildbot hostname, if present
+ ifstream hostf( "../../info/host" );
+ if ( hostf.good() ) {
+ char buf[1024];
+ hostf.getline(buf, sizeof(buf));
+ _perfhostname = buf;
+ }
+ else {
+ _perfhostname = getHostName();
+ }
+ }
+ else {
+ cout << err << " (to log perfstats)" << endl;
+ }
+ }
+ }
+ catch(...) { }
+ }
+
+ virtual unsigned batchSize() { return 50; }
+
+ void say(unsigned long long n, int ms, string s) {
+ unsigned long long rps = n*1000/ms;
+ cout << "stats " << setw(33) << left << s << ' ' << right << setw(9) << rps << ' ' << right << setw(5) << ms << "ms ";
+ if( showDurStats() )
+ cout << dur::stats.curr->_asCSV();
+ cout << endl;
+
+ connect();
+
+ if( conn && !conn->isFailed() ) {
+ const char *ns = "perf.pstats";
+ if( perfHist ) {
+ static bool needver = true;
+ try {
+ // try to report rps from last time */
+ Query q;
+ {
+ BSONObjBuilder b;
+ b.append("host",_perfhostname).append("test",s).append("dur",cmdLine.dur);
+ DEV { b.append("info.DEBUG",true); }
+ else b.appendNull("info.DEBUG");
+ if( sizeof(int*) == 4 )
+ b.append("info.bits", 32);
+ else
+ b.appendNull("info.bits");
+ q = Query(b.obj()).sort("when",-1);
+ }
+ BSONObj fields = BSON( "rps" << 1 << "info" << 1 );
+ vector<BSONObj> v;
+ conn->findN(v, ns, q, perfHist, 0, &fields);
+ for( vector<BSONObj>::iterator i = v.begin(); i != v.end(); i++ ) {
+ BSONObj o = *i;
+ double lastrps = o["rps"].Number();
+ if( lastrps ) {
+ cout << "stats " << setw(33) << right << "new/old:" << ' ' << setw(9);
+ cout << fixed << setprecision(2) << rps / lastrps;
+ if( needver ) {
+ cout << " " << o.getFieldDotted("info.git").toString();
+ }
+ cout << '\n';
+ }
+ }
+ } catch(...) { }
+ cout.flush();
+ needver = false;
+ }
+ {
+ bob b;
+ b.append("host", _perfhostname);
+ b.appendTimeT("when", time(0));
+ b.append("test", s);
+ b.append("rps", (int) rps);
+ b.append("millis", ms);
+ b.appendBool("dur", cmdLine.dur);
+ if( showDurStats() && cmdLine.dur )
+ b.append("durStats", dur::stats.curr->_asObj());
+ {
+ bob inf;
+ inf.append("version", versionString);
+ if( sizeof(int*) == 4 ) inf.append("bits", 32);
+ DEV inf.append("DEBUG", true);
+#if defined(_WIN32)
+ inf.append("os", "win");
+#endif
+ inf.append("git", gitVersion());
+ inf.append("boost", BOOST_VERSION);
+ b.append("info", inf.obj());
+ }
+ BSONObj o = b.obj();
+ //cout << "inserting " << o.toString() << endl;
+ try {
+ conn->insert(ns, o);
+ }
+ catch ( std::exception& e ) {
+ warning() << "couldn't save perf results: " << e.what() << endl;
+ }
+ }
+ }
+ }
+
+ virtual bool testThreaded() { return false; }
+
+ unsigned long long n;
+
+ void run() {
+ _ns = string("perftest.") + name();
+ client().dropCollection(ns());
+
+ prep();
+
+ int hlm = howLongMillis();
+ DEV {
+ // don't run very long with _DEBUG - not very meaningful anyway on that build
+ hlm = min(hlm, 500);
+ }
+
+ dur::stats._intervalMicros = 0; // no auto rotate
+ dur::stats.curr->reset();
+ mongo::Timer t;
+ n = 0;
+ const unsigned Batch = batchSize();
+
+ if( hlm == 0 ) {
+ // means just do once
+ timed();
+ }
+ else {
+ do {
+ unsigned i;
+ for( i = 0; i < Batch; i++ )
+ timed();
+ n += i;
+ } while( t.micros() < (unsigned) hlm * 1000 );
+ }
+
+ client().getLastError(); // block until all ops are finished
+ int ms = t.millis();
+
+ say(n, ms, name());
+
+ post();
+
+ string test2name = timed2(client());
+ {
+ if( test2name.size() != 0 ) {
+ dur::stats.curr->reset();
+ mongo::Timer t;
+ unsigned long long n = 0;
+ while( 1 ) {
+ unsigned i;
+ for( i = 0; i < Batch; i++ )
+ timed2(client());
+ n += i;
+ if( t.millis() > hlm )
+ break;
+ }
+ int ms = t.millis();
+ say(n, ms, test2name);
+ }
+ }
+
+ if( testThreaded() ) {
+ cout << "testThreaded" << endl;
+ mongo::Timer t;
+ launchThreads(8);
+ //cout << "threaded done " << t.millis() << "ms" << endl;
+ //cout << n * 1000 / t.millis() << " per second" << endl;
+ say(n, t.millis(), test2name+"-threaded");
+
+ }
+ }
+
+ void thread() {
+ DBClientType c;
+ Client::initThreadIfNotAlready("perftestthr");
+ for( unsigned long long i = 0; i < n/8; i++ ) {
+ timed2(c);
+ }
+ cc().shutdown();
+ }
+
+ void launchThreads(int remaining) {
+ if (!remaining)
+ return;
+ boost::thread athread(boost::bind(&B::thread, this));
+ launchThreads(remaining - 1);
+ athread.join();
+ }
+ };
+
+ boost::shared_ptr<DBClientConnection> B::conn;
+ string B::_perfhostname;
+ unsigned B::once;
+
+ unsigned dontOptimizeOutHopefully;
+
+ class NonDurTest : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class BSONIter : public NonDurTest {
+ public:
+ int n;
+ bo b, sub;
+ string name() { return "BSONIter"; }
+ BSONIter() {
+ n = 0;
+ bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
+ b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
+ }
+ void timed() {
+ for( bo::iterator i = b.begin(); i.more(); )
+ if( i.next().fieldName() )
+ n++;
+ for( bo::iterator i = sub.begin(); i.more(); )
+ if( i.next().fieldName() )
+ n++;
+ }
+ };
+
+ class BSONGetFields1 : public NonDurTest {
+ public:
+ int n;
+ bo b, sub;
+ string name() { return "BSONGetFields1By1"; }
+ BSONGetFields1() {
+ n = 0;
+ bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
+ b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
+ }
+ void timed() {
+ if( b["x"].eoo() )
+ n++;
+ if( b["q"].eoo() )
+ n++;
+ if( b["zzz"].eoo() )
+ n++;
+ }
+ };
+
+ class BSONGetFields2 : public BSONGetFields1 {
+ public:
+ string name() { return "BSONGetFields"; }
+ void timed() {
+ static const char *names[] = { "x", "q", "zzz" };
+ BSONElement elements[3];
+ b.getFields(3, names, elements);
+ if( elements[0].eoo() )
+ n++;
+ if( elements[1].eoo() )
+ n++;
+ if( elements[2].eoo() )
+ n++;
+ }
+ };
+
+ class KeyTest : public B {
+ public:
+ KeyV1Owned a,b,c;
+ string name() { return "Key-woequal"; }
+ virtual int howLongMillis() { return 3000; }
+ KeyTest() :
+ a(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
+ b(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
+ c(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqqb"))
+ {}
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ assert( a.woEqual(b) );
+ assert( !a.woEqual(c) );
+ }
+ };
+
+ unsigned long long aaa;
+
+ class Timer : public B {
+ public:
+ string name() { return "Timer"; }
+ virtual int howLongMillis() { return 1000; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::Timer t;
+ aaa += t.millis();
+ }
+ };
+
+ class Sleep0Ms : public B {
+ public:
+ string name() { return "Sleep0Ms"; }
+ virtual int howLongMillis() { return 400; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ sleepmillis(0);
+ mongo::Timer t;
+ aaa++;
+ }
+ };
+
+ RWLock lk("testrw");
+ SimpleMutex m("simptst");
+ mongo::mutex mtest("mtest");
+ SpinLock s;
+
+ class mutexspeed : public B {
+ public:
+ string name() { return "mutex"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::mutex::scoped_lock lk(mtest);
+ }
+ };
+ class simplemutexspeed : public B {
+ public:
+ string name() { return "simplemutex"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ SimpleMutex::scoped_lock lk(m);
+ }
+ };
+ class spinlockspeed : public B {
+ public:
+ string name() { return "spinlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::scoped_spinlock lk(s);
+ }
+ };
+ int cas;
+ class casspeed : public B {
+ public:
+ string name() { return "compareandswap"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+#define RUNCOMPARESWAP 1
+ __sync_bool_compare_and_swap(&cas, 0, 0);
+#endif
+ }
+ };
+ class rlock : public B {
+ public:
+ string name() { return "rlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lock_shared();
+ lk.unlock_shared();
+ }
+ };
+ class wlock : public B {
+ public:
+ string name() { return "wlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lock();
+ lk.unlock();
+ }
+ };
+
+#if 0
+ class ulock : public B {
+ public:
+ string name() { return "ulock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lockAsUpgradable();
+ lk.unlockFromUpgradable();
+ }
+ };
+#endif
+
+ class CTM : public B {
+ public:
+ CTM() : last(0), delts(0), n(0) { }
+ string name() { return "curTimeMillis64"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ unsigned long long last;
+ unsigned long long delts;
+ unsigned n;
+ void timed() {
+ unsigned long long x = curTimeMillis64();
+ aaa += x;
+ if( last ) {
+ unsigned long long delt = x-last;
+ if( delt ) {
+ delts += delt;
+ n++;
+ }
+ }
+ last = x;
+ }
+ void post() {
+ // we need to know if timing is highly ungranular - that could be relevant in some places
+ if( n )
+ cout << " avg timer granularity: " << ((double)delts)/n << "ms " << endl;
+ }
+ };
+
+ class Bldr : public B {
+ public:
+ int n;
+ string name() { return "BufBuilder"; }
+ Bldr() {
+ }
+ virtual int howLongMillis() { return 3000; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ BufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+ };
+
+ class StkBldr : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ int n;
+ string name() { return "StackBufBuilder"; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ StackBufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+ };
+
+ // if a test is this fast, it was optimized out
+ class Dummy : public B {
+ public:
+ Dummy() { }
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "dummy"; }
+ void timed() {
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ // test thread local speed
+#if defined(_WIN32)
+ __declspec( thread ) int x;
+ class TLS2 : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "thread-local-storage2"; }
+ void timed() {
+ if( x )
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+#endif
+
+ // test thread local speed
+ class TLS : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "thread-local-storage"; }
+ void timed() {
+ if( &cc() )
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ bool dummy1 = false;
+
+ class TestException : public DBException {
+ public:
+ TestException() : DBException("testexception",3) { }
+ };
+
+ void foo_throws() {
+ if( dontOptimizeOutHopefully ) {
+ throw TestException();
+ }
+ log() << "hmmm" << endl;
+ }
+
+ class Throw : public B {
+ public:
+ virtual int howLongMillis() { return 2000; }
+ string name() { return "throw"; }
+ void timed() {
+ try {
+ foo_throws();
+ dontOptimizeOutHopefully += 2;
+ }
+ catch(DBException& e) {
+ e.getCode();
+ dontOptimizeOutHopefully++;
+ }
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class New128 : public B {
+ public:
+ virtual int howLongMillis() { return 2000; }
+ string name() { return "new128"; }
+ void timed() {
+ char *p = new char[128];
+ if( dontOptimizeOutHopefully++ > 0 )
+ delete p;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class New8 : public B {
+ public:
+ virtual int howLongMillis() { return 2000; }
+ string name() { return "new8"; }
+ void timed() {
+ char *p = new char[8];
+ if( dontOptimizeOutHopefully++ > 0 )
+ delete p;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class Compress : public B {
+ public:
+ const unsigned sz;
+ void *p;
+ Compress() : sz(1024*1024*100+3) { }
+ virtual unsigned batchSize() { return 1; }
+ string name() { return "compress"; }
+ virtual bool showDurStats() { return false; }
+ virtual int howLongMillis() { return 4000; }
+ void prep() {
+ p = malloc(sz);
+ // this isn't a fair test as it is mostly rands but we just want a rough perf check
+ static int last;
+ for (unsigned i = 0; i<sz; i++) {
+ int r = rand();
+ if( (r & 0x300) == 0x300 )
+ r = last;
+ ((char*)p)[i] = r;
+ last = r;
+ }
+ }
+ size_t last;
+ string res;
+ void timed() {
+ mongo::Timer t;
+ string out;
+ size_t len = compress((const char *) p, sz, &out);
+ bool ok = uncompress(out.c_str(), out.size(), &res);
+ ASSERT(ok);
+ static unsigned once;
+ if( once++ == 0 )
+ cout << "compress round trip " << sz/(1024.0*1024) / (t.millis()/1000.0) << "MB/sec\n";
+ //cout << len / (1024.0/1024) << " compressed" << endl;
+ (void)len; //fix unused error while above line is commented out
+ }
+ void post() {
+ ASSERT( memcmp(res.c_str(), p, sz) == 0 );
+ free(p);
+ }
+ };
+
+ // test speed of checksum method
+ class ChecksumTest : public B {
+ public:
+ const unsigned sz;
+ ChecksumTest() : sz(1024*1024*100+3) { }
+ string name() { return "checksum"; }
+ virtual int howLongMillis() { return 2000; }
+ virtual bool showDurStats() { return false; }
+ virtual unsigned batchSize() { return 1; }
+
+ void *p;
+
+ void prep() {
+ {
+ // the checksum code assumes 'standard' rollover on addition overflows. let's check that:
+ unsigned long long x = 0xffffffffffffffffULL;
+ ASSERT( x+2 == 1 );
+ }
+
+ p = malloc(sz);
+ for (unsigned i = 0; i<sz; i++)
+ ((char*)p)[i] = rand();
+ }
+
+ Checksum last;
+
+ void timed() {
+ static int i;
+ Checksum c;
+ c.gen(p, sz);
+ if( i == 0 )
+ last = c;
+ else if( i == 1 ) {
+ ASSERT( c == last );
+ }
+ }
+ void post() {
+ {
+ mongo::Checksum c;
+ c.gen(p, sz-1);
+ ASSERT( c != last );
+ ((char *&)p)[0]++; // check same data, different order, doesn't give same checksum
+ ((char *&)p)[1]--;
+ c.gen(p, sz);
+ ASSERT( c != last );
+ ((char *&)p)[1]++; // check same data, different order, doesn't give same checksum (different longwords case)
+ ((char *&)p)[8]--;
+ c.gen(p, sz);
+ ASSERT( c != last );
+ }
+ free(p);
+ }
+ };
+
+ class InsertDup : public B {
+ const BSONObj o;
+ public:
+ InsertDup() : o( BSON("_id" << 1) ) { } // dup keys
+ string name() {
+ return "insert-duplicate-_ids";
+ }
+ void prep() {
+ client().insert( ns(), o );
+ }
+ void timed() {
+ client().insert( ns(), o );
+ }
+ void post() {
+ assert( client().count(ns()) == 1 );
+ }
+ };
+
+ class Insert1 : public B {
+ const BSONObj x;
+ OID oid;
+ BSONObj query;
+ public:
+ virtual int howLongMillis() { return 30000; }
+ Insert1() : x( BSON("x" << 99) ) {
+ oid.init();
+ query = BSON("_id" << oid);
+ i = 0;
+ }
+ string name() { return "insert-simple"; }
+ unsigned i;
+ void timed() {
+ BSONObj o = BSON( "_id" << i++ << "x" << 99 );
+ client().insert( ns(), o );
+ //client().insert( ns(), x );
+ }
+ virtual bool testThreaded() { return true; }
+ string timed2(DBClientBase& c) {
+ Query q = QUERY( "_id" << (unsigned) Security::getNonce() % i );
+ c.findOne(ns(), q);
+ //client().findOne(ns(), query);
+ return "findOne_by_id";
+ }
+ void post() {
+#if !defined(_DEBUG)
+ assert( client().count(ns()) > 50 );
+#endif
+ }
+ };
+
+ class InsertBig : public B {
+ BSONObj x;
+ virtual int howLongMillis() {
+ if( sizeof(void*) == 4 )
+ return 1000; // could exceed mmapping if run too long, as this function adds a lot fasta
+ return 5000;
+ }
+ public:
+ InsertBig() {
+ char buf[200000];
+ BSONObjBuilder b;
+ b.append("x", 99);
+ b.appendBinData("bin", 200000, (BinDataType) 129, buf);
+ x = b.obj();
+ }
+ string name() { return "insert-big"; }
+ void timed() {
+ client().insert( ns(), x );
+ }
+ };
+
+ class InsertRandom : public B {
+ public:
+ virtual int howLongMillis() { return profiling ? 30000 : 5000; }
+ string name() { return "random-inserts"; }
+ void prep() {
+ client().insert( ns(), BSONObj() );
+ client().ensureIndex(ns(), BSON("x"<<1));
+ }
+ void timed() {
+ int x = rand();
+ BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
+ client().insert(ns(), y);
+ }
+ };
+
+ /** upserts about 32k records and then keeps updating them
+ 2 indexes
+ */
+ class Update1 : public B {
+ public:
+ static int rand() {
+ return std::rand() & 0x7fff;
+ }
+ virtual string name() { return "random-upserts"; }
+ void prep() {
+ client().insert( ns(), BSONObj() );
+ client().ensureIndex(ns(), BSON("x"<<1));
+ }
+ void timed() {
+ int x = rand();
+ BSONObj q = BSON("x" << x);
+ BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
+ client().update(ns(), q, y, /*upsert*/true);
+ }
+
+ virtual string timed2(DBClientBase& c) {
+ static BSONObj I = BSON( "$inc" << BSON( "y" << 1 ) );
+
+ // test some $inc's
+
+ int x = rand();
+ BSONObj q = BSON("x" << x);
+ c.update(ns(), q, I);
+
+ return name()+"-inc";
+ }
+ };
+
+ template <typename T>
+ class MoreIndexes : public T {
+ public:
+ string name() { return T::name() + "-more-indexes"; }
+ void prep() {
+ T::prep();
+ this->client().ensureIndex(this->ns(), BSON("y"<<1));
+ this->client().ensureIndex(this->ns(), BSON("z"<<1));
+ }
+ };
+
+ void t() {
+ for( int i = 0; i < 20; i++ ) {
+ sleepmillis(21);
+ string fn = "/tmp/t1";
+ MongoMMF f;
+ unsigned long long len = 1 * 1024 * 1024;
+ assert( f.create(fn, len, /*sequential*/rand()%2==0) );
+ {
+ char *p = (char *) f.getView();
+ assert(p);
+ // write something to the private view as a test
+ strcpy(p, "hello");
+ }
+ if( cmdLine.dur ) {
+ char *w = (char *) f.view_write();
+ strcpy(w + 6, "world");
+ }
+ MongoFileFinder ff;
+ ASSERT( ff.findByPath(fn) );
+ }
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "perf" ) { }
+
+ Result * run( const string& filter ) {
+ boost::thread a(t);
+ Result * res = Suite::run(filter);
+ a.join();
+ return res;
+ }
+
+ void setupTests() {
+ cout
+ << "stats test rps------ time-- "
+ << dur::stats.curr->_CSVHeader() << endl;
+ if( profiling ) {
+ add< New8 >();
+ add< New128 >();
+ }
+ else {
+ add< Dummy >();
+ add< ChecksumTest >();
+ add< Compress >();
+ add< TLS >();
+#if defined(_WIN32)
+ add< TLS2 >();
+#endif
+ add< New8 >();
+ add< New128 >();
+ add< Throw >();
+ add< Timer >();
+ add< Sleep0Ms >();
+ add< rlock >();
+ add< wlock >();
+ //add< ulock >();
+ add< mutexspeed >();
+ add< simplemutexspeed >();
+ add< spinlockspeed >();
+#ifdef RUNCOMPARESWAP
+ add< casspeed >();
+#endif
+ add< CTM >();
+ add< KeyTest >();
+ add< Bldr >();
+ add< StkBldr >();
+ add< BSONIter >();
+ add< BSONGetFields1 >();
+ add< BSONGetFields2 >();
+ add< TaskQueueTest >();
+ add< InsertDup >();
+ add< Insert1 >();
+ add< InsertRandom >();
+ add< MoreIndexes<InsertRandom> >();
+ add< Update1 >();
+ add< MoreIndexes<Update1> >();
+ add< InsertBig >();
+ }
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/queryoptimizercursortests.cpp b/src/mongo/dbtests/queryoptimizercursortests.cpp
new file mode 100644
index 00000000000..2d5590db3b7
--- /dev/null
+++ b/src/mongo/dbtests/queryoptimizercursortests.cpp
@@ -0,0 +1,2521 @@
+// queryoptimizertests.cpp : query optimizer unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/queryoptimizer.h"
+#include "../db/queryoptimizercursor.h"
+#include "../db/instance.h"
+#include "../db/ops/delete.h"
+#include "dbtests.h"
+
+namespace mongo {
+ void __forceLinkGeoPlugin();
+ shared_ptr<Cursor> newQueryOptimizerCursor( const char *ns, const BSONObj &query, const BSONObj &order = BSONObj(), bool requireIndex = false );
+} // namespace mongo
+
+namespace QueryOptimizerCursorTests {
+
+ void dropCollection( const char *ns ) {
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( ns, errmsg, result );
+ }
+
+ using boost::shared_ptr;
+
+ class CachedMatchCounterCount {
+ public:
+ void run() {
+ long long aggregateNscanned;
+ CachedMatchCounter c( aggregateNscanned, 0 );
+ ASSERT_EQUALS( 0, c.count() );
+ ASSERT_EQUALS( 0, c.cumulativeCount() );
+
+ c.resetMatch();
+ ASSERT( !c.knowMatch() );
+
+ c.setMatch( false );
+ ASSERT( c.knowMatch() );
+
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 0, c.count() );
+ ASSERT_EQUALS( 0, c.cumulativeCount() );
+
+ c.resetMatch();
+ ASSERT( !c.knowMatch() );
+
+ c.setMatch( true );
+ ASSERT( c.knowMatch() );
+
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+ ASSERT_EQUALS( 1, c.cumulativeCount() );
+
+ // Don't count the same match twice, without checking the document location.
+ c.countMatch( DiskLoc( 1, 1 ) );
+ ASSERT_EQUALS( 1, c.count() );
+ ASSERT_EQUALS( 1, c.cumulativeCount() );
+
+ // Reset and count another match.
+ c.resetMatch();
+ c.setMatch( true );
+ c.countMatch( DiskLoc( 1, 1 ) );
+ ASSERT_EQUALS( 2, c.count() );
+ ASSERT_EQUALS( 2, c.cumulativeCount() );
+ }
+ };
+
+ class CachedMatchCounterAccumulate {
+ public:
+ void run() {
+ long long aggregateNscanned;
+ CachedMatchCounter c( aggregateNscanned, 10 );
+ ASSERT_EQUALS( 0, c.count() );
+ ASSERT_EQUALS( 10, c.cumulativeCount() );
+
+ c.setMatch( true );
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+ ASSERT_EQUALS( 11, c.cumulativeCount() );
+ }
+ };
+
+ class CachedMatchCounterDedup {
+ public:
+ void run() {
+ long long aggregateNscanned;
+ CachedMatchCounter c( aggregateNscanned, 0 );
+
+ c.setCheckDups( true );
+ c.setMatch( true );
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+
+ c.resetMatch();
+ c.setMatch( true );
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+ }
+ };
+
+ class CachedMatchCounterNscanned {
+ public:
+ void run() {
+ long long aggregateNscanned = 5;
+ CachedMatchCounter c( aggregateNscanned, 0 );
+ ASSERT_EQUALS( 0, c.nscanned() );
+ ASSERT_EQUALS( 5, c.aggregateNscanned() );
+
+ c.updateNscanned( 4 );
+ ASSERT_EQUALS( 4, c.nscanned() );
+ ASSERT_EQUALS( 9, c.aggregateNscanned() );
+ }
+ };
+
+ class SmallDupSetUpgrade {
+ public:
+ void run() {
+ SmallDupSet d;
+ for( int i = 0; i < 100; ++i ) {
+ ASSERT( !d.getsetdup( DiskLoc( 0, i ) ) );
+ for( int j = 0; j <= i; ++j ) {
+ ASSERT( d.getdup( DiskLoc( 0, j ) ) );
+ }
+ }
+ }
+ };
+
+ class SmallDupSetUpgradeRead {
+ public:
+ void run() {
+ SmallDupSet d;
+ d.getsetdup( DiskLoc( 0, 0 ) );
+ for( int i = 0; i < 550; ++i ) {
+ ASSERT( d.getdup( DiskLoc( 0, 0 ) ) );
+ }
+ ASSERT( d.getsetdup( DiskLoc( 0, 0 ) ) );
+ }
+ };
+
+ class SmallDupSetUpgradeWrite {
+ public:
+ void run() {
+ SmallDupSet d;
+ for( int i = 0; i < 550; ++i ) {
+ ASSERT( !d.getsetdup( DiskLoc( 0, i ) ) );
+ }
+ for( int i = 0; i < 550; ++i ) {
+ ASSERT( d.getsetdup( DiskLoc( 0, i ) ) );
+ }
+ }
+ };
+
+ class Base {
+ public:
+ Base() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ dropCollection( ns() );
+ }
+ ~Base() {
+ cc().curop()->reset();
+ }
+ protected:
+ DBDirectClient _cli;
+ static const char *ns() { return "unittests.QueryOptimizerTests"; }
+ void setQueryOptimizerCursor( const BSONObj &query, const BSONObj &order = BSONObj() ) {
+ setQueryOptimizerCursorWithoutAdvancing( query, order );
+ if ( ok() && !mayReturnCurrent() ) {
+ advance();
+ }
+ }
+ void setQueryOptimizerCursorWithoutAdvancing( const BSONObj &query, const BSONObj &order = BSONObj() ) {
+ _c = newQueryOptimizerCursor( ns(), query, order, false );
+ }
+ bool ok() const { return _c->ok(); }
+ /** Handles matching and deduping. */
+ bool advance() {
+ while( _c->advance() && !mayReturnCurrent() );
+ return ok();
+ }
+ int itcount() {
+ int ret = 0;
+ while( ok() ) {
+ ++ret;
+ advance();
+ }
+ return ret;
+ }
+ BSONObj current() const { return _c->current(); }
+ DiskLoc currLoc() const { return _c->currLoc(); }
+ void prepareToTouchEarlierIterate() { _c->prepareToTouchEarlierIterate(); }
+ void recoverFromTouchingEarlierIterate() { _c->recoverFromTouchingEarlierIterate(); }
+ bool mayReturnCurrent() {
+// return _c->currentMatches() && !_c->getsetdup( _c->currLoc() );
+ return ( !_c->matcher() || _c->matcher()->matchesCurrent( _c.get() ) ) && !_c->getsetdup( _c->currLoc() );
+ }
+ bool prepareToYield() const { return _c->prepareToYield(); }
+ void recoverFromYield() {
+ _c->recoverFromYield();
+ if ( ok() && !mayReturnCurrent() ) {
+ advance();
+ }
+ }
+ shared_ptr<Cursor> c() { return _c; }
+ long long nscanned() const { return _c->nscanned(); }
+ private:
+ shared_ptr<Cursor> _c;
+ };
+
+ /** No results for empty collection. */
+ class Empty : public Base {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSONObj() );
+ ASSERT( !c->ok() );
+ ASSERT_THROWS( c->_current(), AssertionException );
+ ASSERT_THROWS( c->current(), AssertionException );
+ ASSERT( c->currLoc().isNull() );
+ ASSERT( !c->advance() );
+ ASSERT_THROWS( c->currKey(), AssertionException );
+ ASSERT_THROWS( c->getsetdup( DiskLoc() ), AssertionException );
+ ASSERT_THROWS( c->isMultiKey(), AssertionException );
+ ASSERT_THROWS( c->matcher(), AssertionException );
+ }
+ };
+
+ /** Simple table scan. */
+ class Unindexed : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSONObj() );
+ ASSERT_EQUALS( 2, itcount() );
+ }
+ };
+
+ /** Basic test with two indexes and deduping requirement. */
+ class Basic : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 2 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 2 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ class NoMatch : public Base {
+ public:
+ void run() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 5 << LT << 4 << "a" << GT << 0 ) );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Order of results indicates that interleaving is occurring. */
+ class Interleaved : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 2 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 3 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 2 << "a" << 2 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Some values on each index do not match. */
+ class NotMatch : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 10 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 11 << "a" << 12 ) );
+ _cli.insert( ns(), BSON( "_id" << 12 << "a" << 11 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** After the first 101 matches for a plan, we stop interleaving the plans. */
+ class StopInterleaving : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ for( int i = 101; i < 200; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << (301-i) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << -1 ) );
+ for( int i = 0; i < 200; ++i ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Test correct deduping with the takeover cursor. */
+ class TakeoverWithDup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 500 << "a" << BSON_ARRAY( 0 << 300 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << -1 ) );
+ ASSERT_EQUALS( 102, itcount() );
+ }
+ };
+
+ /** Test usage of matcher with takeover cursor. */
+ class TakeoverWithNonMatches : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 101 << "a" << 600 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << LT << 500 ) );
+ ASSERT_EQUALS( 101, itcount() );
+ }
+ };
+
+ /** Check deduping of dups within just the takeover cursor. */
+ class TakeoverWithTakeoverDup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i*2 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << i*2+1 << "a" << 1 ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 202 << "a" << BSON_ARRAY( 2 << 3 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << 0) );
+ ASSERT_EQUALS( 102, itcount() );
+ }
+ };
+
+ /** Basic test with $or query. */
+ class BasicOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 0 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or first clause empty. */
+ class OrFirstClauseEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << -1 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or second clause empty. */
+ class OrSecondClauseEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 0 ) << BSON( "_id" << -1 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or multiple clauses empty empty. */
+ class OrMultipleClausesEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 2 ) << BSON( "_id" << 4 ) << BSON( "_id" << 0 ) << BSON( "_id" << -1 ) << BSON( "_id" << 6 ) << BSON( "a" << 1 ) << BSON( "_id" << 9 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** Check that takeover occurs at proper match count with $or clauses */
+ class TakeoverCountOr : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 60; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 0 ) );
+ }
+ for( int i = 60; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 1 ) );
+ }
+ for( int i = 120; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << (200-i) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "a" << 0 ) << BSON( "a" << 1 ) << BSON( "_id" << GTE << 120 << "a" << GT << 1 ) ) ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( ok() );
+ advance();
+ }
+ // Expect to be scanning on _id index only.
+ for( int i = 120; i < 150; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Takeover just at end of clause. */
+ class TakeoverEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 102; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 101 ) << BSON( "_id" << 101 ) ) ) );
+ for( int i = 0; i < 102; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ class TakeoverBeforeEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 100 ) << BSON( "_id" << 100 ) ) ) );
+ for( int i = 0; i < 101; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ class TakeoverAfterEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 103; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 102 ) << BSON( "_id" << 102 ) ) ) );
+ for( int i = 0; i < 103; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Test matching and deduping done manually by cursor client. */
+ class ManualMatchingDeduping : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 10 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 11 << "a" << 12 ) );
+ _cli.insert( ns(), BSON( "_id" << 12 << "a" << 11 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( c->ok() );
+
+ // _id 10 {_id:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 0 {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 0 {$natural:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {a:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 10 {$natural:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {a:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {$natural:1}
+ ASSERT_EQUALS( 11, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+
+ // {_id:1} scan is complete.
+ ASSERT( !c->advance() );
+ ASSERT( !c->ok() );
+
+ // Scan the results again - this time the winning plan has been
+ // recorded.
+ c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( c->ok() );
+
+ // _id 10 {_id:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+
+ // {_id:1} scan complete
+ ASSERT( !c->advance() );
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Curr key must be correct for currLoc for correct matching. */
+ class ManualMatchingUsingCurrKey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << "a" ) );
+ _cli.insert( ns(), BSON( "_id" << "b" ) );
+ _cli.insert( ns(), BSON( "_id" << "ba" ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), fromjson( "{_id:/a/}" ) );
+ ASSERT( c->ok() );
+ // "a"
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+ ASSERT( c->ok() );
+
+ // "b"
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ ASSERT( c->ok() );
+
+ // "ba"
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Test matching and deduping done manually by cursor client. */
+ class ManualMatchingDedupingTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 0 ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 300 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 300 ) << BSON( "a" << 1 ) ) ) );
+ for( int i = 0; i < 151; ++i ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Test single key matching bounds. */
+ class Singlekey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << "10" ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "a" << GT << 1 << LT << 5 ) );
+ // Two sided bounds work.
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Test multi key matching bounds. */
+ class Multikey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 10 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << 5 << LT << 3 ) );
+ // Multi key bounds work.
+ ASSERT( ok() );
+ }
+ };
+
+ /** Add other plans when the recorded one is doing more poorly than expected. */
+ class AddOtherPlans : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 << "b" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 << "b" << 0 ) );
+ for( int i = 100; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 100 << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 0 << "b" << 0 ) );
+
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "a" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "b" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ // Unindexed plan
+ ASSERT_EQUALS( BSONObj(), c->indexKeyPattern() );
+ ASSERT( !c->advance() );
+
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << 100 << "b" << 149 ) );
+ // Try {a:1}, which was successful previously.
+ for( int i = 0; i < 12; ++i ) {
+ ASSERT( 149 != c->current().getIntField( "b" ) );
+ ASSERT( c->advance() );
+ }
+ bool sawB1Index = false;
+ do {
+ if ( c->indexKeyPattern() == BSON( "b" << 1 ) ) {
+ ASSERT_EQUALS( 149, c->current().getIntField( "b" ) );
+ // We should try the {b:1} index and only see one result from it.
+ ASSERT( !sawB1Index );
+ sawB1Index = true;
+ }
+ } while ( c->advance() );
+ ASSERT( sawB1Index );
+ }
+ };
+
+ /** Add other plans when the recorded one is doing more poorly than expected, with deletion. */
+ class AddOtherPlansDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 << "b" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 << "b" << 0 ) );
+ for( int i = 100; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 100 << "b" << i ) );
+ }
+ for( int i = 199; i >= 150; --i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 100 << "b" << 150 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 0 << "b" << 0 ) );
+
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "a" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "b" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ // Unindexed plan
+ ASSERT_EQUALS( BSONObj(), c->indexKeyPattern() );
+ ASSERT( !c->advance() );
+
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << 100 << "b" << 150 ) );
+ // Try {a:1}, which was successful previously.
+ for( int i = 0; i < 12; ++i ) {
+ ASSERT( 150 != c->current().getIntField( "b" ) );
+ ASSERT_EQUALS( BSON( "a" << 1 ), c->indexKeyPattern() );
+ ASSERT( c->advance() );
+ }
+ // Now try {b:1} plan.
+ ASSERT_EQUALS( BSON( "b" << 1 ), c->indexKeyPattern() );
+ ASSERT_EQUALS( 150, c->current().getIntField( "b" ) );
+ ASSERT( c->currentMatches() );
+ int id = c->current().getIntField( "_id" );
+ c->advance();
+ c->prepareToTouchEarlierIterate();
+ _cli.remove( ns(), BSON( "_id" << id ) );
+ c->recoverFromTouchingEarlierIterate();
+ int count = 1;
+ while( c->ok() ) {
+ if ( c->currentMatches() ) {
+ ++count;
+ int id = c->current().getIntField( "_id" );
+ c->advance();
+ c->prepareToTouchEarlierIterate();
+ _cli.remove( ns(), BSON( "_id" << id ) );
+ c->recoverFromTouchingEarlierIterate();
+ }
+ else {
+ c->advance();
+ }
+ }
+ ASSERT_EQUALS( 50, count );
+ }
+ };
+
+ /**
+ * Add other plans when the recorded one is doing more poorly than expected, with deletion before
+ * and after adding the additional plans.
+ */
+ class AddOtherPlansContinuousDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 << "b" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 << "b" << 0 ) );
+ for( int i = 100; i < 400; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i << "b" << ( 499 - i ) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << GTE << -1 << LTE << 0 << "b" << GTE << -1 << LTE << 0 ) );
+ while( c->advance() );
+ // {a:1} plan should be recorded now.
+
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << GTE << 100 << LTE << 400 << "b" << GTE << 100 << LTE << 400 ) );
+ int count = 0;
+ while( c->ok() ) {
+ if ( c->currentMatches() ) {
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ++count;
+ int id = c->current().getIntField( "_id" );
+ c->advance();
+ c->prepareToTouchEarlierIterate();
+ _cli.remove( ns(), BSON( "_id" << id ) );
+ c->recoverFromTouchingEarlierIterate();
+ } else {
+ c->advance();
+ }
+ }
+ ASSERT_EQUALS( 300, count );
+ ASSERT_EQUALS( 2U, _cli.count( ns(), BSONObj() ) );
+ }
+ };
+
+ /** Check $or clause range elimination. */
+ class OrRangeElimination : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) << BSON( "_id" << 1 ) ) ) );
+ ASSERT( c->ok() );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Check $or match deduping - in takeover cursor. */
+ class OrDedup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 140 ) << BSON( "_id" << 145 ) << BSON( "a" << 145 ) ) ) );
+
+ while( c->current().getIntField( "_id" ) < 140 ) {
+ ASSERT( c->advance() );
+ }
+ // Match from second $or clause.
+ ASSERT_EQUALS( 145, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ // Match from third $or clause.
+ ASSERT_EQUALS( 145, c->current().getIntField( "_id" ) );
+ // $or deduping is handled by the matcher.
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Standard dups with a multikey cursor. */
+ class EarlyDups : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 0 << 1 << 200 ) ) );
+ for( int i = 2; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << -1 ) );
+ ASSERT_EQUALS( 149, itcount() );
+ }
+ };
+
+ /** Pop or clause in takeover cursor. */
+ class OrPopInTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LTE << 147 ) << BSON( "_id" << 148 ) << BSON( "_id" << 149 ) ) ) );
+ for( int i = 0; i < 150; ++i ) {
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( i, c->current().getIntField( "_id" ) );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Or clause iteration abandoned once full collection scan is performed. */
+ class OrCollectionScanAbort : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << BSON_ARRAY( 1 << 2 << 3 << 4 << 5 ) << "b" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << BSON_ARRAY( 6 << 7 << 8 << 9 << 10 ) << "b" << 4 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "a" << LT << 6 << "b" << 4 ) << BSON( "a" << GTE << 6 << "b" << 4 ) ) ) );
+
+ ASSERT( c->ok() );
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {$natural:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 1 on {$natural:1}
+ ASSERT_EQUALS( 1, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // {$natural:1} finished
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ ASSERT( prepareToYield() );
+ recoverFromYield();
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry. */
+ class YieldDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << 1 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ ASSERT( !advance() );
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldDeleteContinue : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldDeleteContinueFurther : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and update current. */
+ class YieldUpdate : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "a" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.update( ns(), BSON( "a" << 1 ), BSON( "$set" << BSON( "a" << 3 ) ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "a" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop collection. */
+ class YieldDrop : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropCollection( ns() );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop collection with $or query. */
+ class YieldDropOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 ) << BSON( "_id" << 2 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropCollection( ns() );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT_THROWS( recoverFromYield(), MsgAssertionException );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and remove document with $or query. */
+ class YieldRemoveOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 ) << BSON( "_id" << 2 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield and overwrite current in capped collection. */
+ class YieldCappedOverwrite : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "x" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "x" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "x" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ int x = 2;
+ while( _cli.count( ns(), BSON( "x" << 1 ) ) > 0 ) {
+ _cli.insert( ns(), BSON( "x" << x++ ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT_THROWS( recoverFromYield(), MsgAssertionException );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop unrelated index - see SERVER-2454. */
+ class YieldDropIndex : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << 1 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with multiple plans active. */
+ class YieldMultiplePlansNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with advance and multiple plans active. */
+ class YieldMultiplePlansAdvanceNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ advance();
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with delete and multiple plans active. */
+ class YieldMultiplePlansDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 4 << "a" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ advance();
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c()->recoverFromYield();
+ ASSERT( ok() );
+ // index {a:1} active during yield
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with delete, multiple plans active, and $or clause. */
+ class YieldMultiplePlansDeleteOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 << "a" << 2 ) << BSON( "_id" << 2 << "a" << 1 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c()->recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with delete, multiple plans active with advancement to the second, and $or clause. */
+ class YieldMultiplePlansDeleteOrAdvance : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 << "a" << 2 ) << BSON( "_id" << 2 << "a" << 1 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ c()->advance();
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c()->recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with multiple plans and capped overwrite. */
+ class YieldMultiplePlansCappedOverwrite : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ int i = 1;
+ while( _cli.count( ns(), BSON( "_id" << 1 ) ) > 0 ) {
+ ++i;
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /**
+ * Yielding with multiple plans and capped overwrite with unrecoverable cursor
+ * active at time of yield.
+ */
+ class YieldMultiplePlansCappedOverwriteManual : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ shared_ptr<Cursor> c;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT_EQUALS( 1, c->current().getIntField( "a" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+ ASSERT_EQUALS( 1, c->current().getIntField( "a" ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->prepareToYield() );
+ }
+
+ int i = 1;
+ while( _cli.count( ns(), BSON( "a" << 1 ) ) > 0 ) {
+ ++i;
+ _cli.insert( ns(), BSON( "a" << i << "b" << i ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c->recoverFromYield();
+ ASSERT( c->ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < c->current().getIntField( "a" ) );
+ }
+ }
+ };
+
+ /**
+ * Yielding with multiple plans and capped overwrite with unrecoverable cursor
+ * inctive at time of yield.
+ */
+ class YieldMultiplePlansCappedOverwriteManual2 : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 ) );
+
+ shared_ptr<Cursor> c;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, c->current().getIntField( "_id" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->prepareToYield() );
+ }
+
+ int n = 1;
+ while( _cli.count( ns(), BSON( "_id" << 1 ) ) > 0 ) {
+ ++n;
+ _cli.insert( ns(), BSON( "_id" << n << "a" << n ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c->recoverFromYield();
+ ASSERT( c->ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < c->current().getIntField( "_id" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ int i = c->current().getIntField( "_id" );
+ ASSERT( c->advance() );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ while( i < n ) {
+ ASSERT( c->advance() );
+ ++i;
+ ASSERT_EQUALS( i, c->current().getIntField( "_id" ) );
+ }
+ }
+ }
+ };
+
+ /** Yield with takeover cursor. */
+ class YieldTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GTE << 0 << "a" << GTE << 0 ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( advance() );
+ }
+ ASSERT( ok() );
+ ASSERT_EQUALS( 120, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 120 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 121, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 122, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield with BacicCursor takeover cursor. */
+ class YieldTakeoverBasic : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << BSON_ARRAY( i << i+1 ) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ auto_ptr<ClientCursor> cc;
+ auto_ptr<ClientCursor::YieldData> data( new ClientCursor::YieldData() );
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "b" << NE << 0 << "a" << GTE << 0 ) );
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout, c(), ns() ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( advance() );
+ }
+ ASSERT( ok() );
+ ASSERT_EQUALS( 120, current().getIntField( "_id" ) );
+ cc->prepareToYield( *data );
+ }
+ _cli.remove( ns(), BSON( "_id" << 120 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( ClientCursor::recoverFromYield( *data ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 121, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 122, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield with advance of inactive cursor. */
+ class YieldInactiveCursorAdvance : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 10 - i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 9, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 9 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 8, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 7, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderId : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSONObj(), BSON( "_id" << 1 ) );
+
+ for( int i = 0; i < 10; ++i, advance() ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderMultiIndex : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 1 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GTE << 0 << "a" << GTE << 0 ), BSON( "_id" << 1 ) );
+
+ for( int i = 0; i < 10; ++i, advance() ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderReject : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i % 5 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GTE << 3 ), BSON( "_id" << 1 ) );
+
+ ASSERT( ok() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 8, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 9, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ }
+ };
+
+ class OrderNatural : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ _cli.insert( ns(), BSON( "_id" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 6 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ), BSON( "$natural" << 1 ) );
+
+ ASSERT( ok() );
+ ASSERT_EQUALS( 5, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 6, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ }
+ };
+
+ class OrderUnindexed : public Base {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( !newQueryOptimizerCursor( ns(), BSONObj(), BSON( "a" << 1 ) ).get() );
+ }
+ };
+
+ class RecordedOrderInvalid : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 2 << "b" << 2 ) );
+ _cli.insert( ns(), BSON( "a" << 3 << "b" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ ASSERT( _cli.query( ns(), QUERY( "a" << 2 ).sort( "b" ) )->more() );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 2 ), BSON( "b" << 1 ) );
+ // Check that we are scanning {b:1} not {a:1}.
+ for( int i = 0; i < 3; ++i ) {
+ ASSERT( c->ok() );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ class KillOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT( ok() );
+ cc().curop()->kill();
+ // First advance() call throws, subsequent calls just fail.
+ ASSERT_THROWS( advance(), MsgAssertionException );
+ ASSERT( !advance() );
+ }
+ };
+
+ class KillOpFirstClause : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) << BSON( "b" << GT << 0 ) ) ) );
+ ASSERT( c->ok() );
+ cc().curop()->kill();
+ // First advance() call throws, subsequent calls just fail.
+ ASSERT_THROWS( c->advance(), MsgAssertionException );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class Nscanned : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "_id" << GTE << 0 << "a" << GTE << 0 ) );
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 2, c->nscanned() );
+ c->advance();
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 2, c->nscanned() );
+ c->advance();
+ for( int i = 3; i < 222; ++i ) {
+ ASSERT( c->ok() );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /* Test 'touching earlier iterate' without doc modifications. */
+ class TouchEarlierIterate : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+
+ ASSERT( c->ok() );
+ while( c->ok() ) {
+ DiskLoc loc = c->currLoc();
+ BSONObj obj = c->current();
+ c->prepareToTouchEarlierIterate();
+ c->recoverFromTouchingEarlierIterate();
+ ASSERT( loc == c->currLoc() );
+ ASSERT_EQUALS( obj, c->current() );
+ c->advance();
+ }
+ }
+ };
+
+ /* Test 'touching earlier iterate' with doc modifications. */
+ class TouchEarlierIterateDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ DiskLoc firstLoc;
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT( ok() );
+ firstLoc = currLoc();
+ ASSERT( c()->advance() );
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << 1 ), true );
+
+ recoverFromTouchingEarlierIterate();
+ ASSERT( ok() );
+ while( ok() ) {
+ ASSERT( firstLoc != currLoc() );
+ c()->advance();
+ }
+ }
+ };
+
+ /* Test 'touch earlier iterate' with several doc modifications. */
+ class TouchEarlierIterateDeleteMultiple : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ deleted.insert( currLoc() );
+ c()->advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 9U, deleted.size() );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with takeover. */
+ class TouchEarlierIterateTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+
+ ASSERT( ok() );
+ int count = 1;
+ while( ok() ) {
+ DiskLoc loc = currLoc();
+ BSONObj obj = current();
+ prepareToTouchEarlierIterate();
+ recoverFromTouchingEarlierIterate();
+ ASSERT( loc == currLoc() );
+ ASSERT_EQUALS( obj, current() );
+ count += mayReturnCurrent();
+ c()->advance();
+ }
+ ASSERT_EQUALS( 599, count );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with takeover and deletes. */
+ class TouchEarlierIterateTakeoverDeleteMultiple : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursorWithoutAdvancing( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ ASSERT( c()->currentMatches() );
+ ASSERT( !c()->getsetdup( currLoc() ) );
+ deleted.insert( currLoc() );
+ c()->advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 599U, deleted.size() );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with undexed cursor takeover and deletes. */
+ class TouchEarlierIterateUnindexedTakeoverDeleteMultiple : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( i << i+1 ) << "b" << BSON_ARRAY( i << i+1 ) << "_id" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursorWithoutAdvancing( BSON( "a" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ ASSERT( c()->currentMatches() );
+ ASSERT( !c()->getsetdup( currLoc() ) );
+ deleted.insert( currLoc() );
+ c()->advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 599U, deleted.size() );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with takeover and deletes, with multiple advances in a row. */
+ class TouchEarlierIterateTakeoverDeleteMultipleMultiAdvance : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ ASSERT( c()->currentMatches() );
+ deleted.insert( currLoc() );
+ advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 599U, deleted.size() );
+ }
+ };
+
+ /* Test yield recovery failure of component capped cursor. */
+ class InitialCappedWrapYieldRecoveryFailure : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "x" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "x" ) );
+
+ ClientCursor::CleanupPointer p;
+ p.reset( new ClientCursor( QueryOption_NoCursorTimeout, c(), ns() ) );
+ ClientCursor::YieldData yieldData;
+ p->prepareToYield( yieldData );
+
+ int x = 2;
+ while( _cli.count( ns(), BSON( "x" << 1 ) ) > 0 ) {
+ _cli.insert( ns(), BSON( "_id" << x << "x" << x ) );
+ ++x;
+ }
+
+ // TODO - Might be preferable to return false rather than assert here.
+ ASSERT_THROWS( ClientCursor::recoverFromYield( yieldData ), AssertionException );
+ }
+ };
+
+ /* Test yield recovery failure of takeover capped cursor. */
+ class TakeoverCappedWrapYieldRecoveryFailure : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 10000, true );
+ for( int i = 0; i < 300; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "x" << i ) );
+ }
+
+ ClientCursor::CleanupPointer p;
+ ClientCursor::YieldData yieldData;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "x" << GTE << 0 ) );
+ for( int i = 0; i < 299; ++i ) {
+ advance();
+ }
+ ASSERT_EQUALS( 299, current().getIntField( "x" ) );
+
+ p.reset( new ClientCursor( QueryOption_NoCursorTimeout, c(), ns() ) );
+ p->prepareToYield( yieldData );
+ }
+
+ int i = 300;
+ while( _cli.count( ns(), BSON( "x" << 299 ) ) > 0 ) {
+ _cli.insert( ns(), BSON( "_id" << i << "x" << i ) );
+ ++i;
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( !ClientCursor::recoverFromYield( yieldData ) );
+ }
+ };
+
+ namespace GetCursor {
+
+ class Base : public QueryOptimizerCursorTests::Base {
+ public:
+ Base() {
+ // create collection
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ }
+ virtual ~Base() {}
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ bool simpleEqualityMatch;
+ if ( expectException() ) {
+ ASSERT_THROWS( NamespaceDetailsTransient::getCursor( ns(), query(), order(), requireIndex(), &simpleEqualityMatch ), MsgAssertionException );
+ return;
+ }
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), query(), order(), requireIndex(), &simpleEqualityMatch );
+ ASSERT_EQUALS( expectSimpleEquality(), simpleEqualityMatch );
+ string type = c->toString().substr( 0, expectedType().length() );
+ ASSERT_EQUALS( expectedType(), type );
+ check( c );
+ }
+ protected:
+ virtual string expectedType() const { return "TESTDUMMY"; }
+ virtual bool expectException() const { return false; }
+ virtual bool expectSimpleEquality() const { return false; }
+ virtual BSONObj query() const { return BSONObj(); }
+ virtual BSONObj order() const { return BSONObj(); }
+ virtual bool requireIndex() const { return false; }
+ virtual void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( !c->matcher() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class NoConstraints : public Base {
+ string expectedType() const { return "BasicCursor"; }
+ };
+
+ class SimpleId : public Base {
+ public:
+ SimpleId() {
+ _cli.insert( ns(), BSON( "_id" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 ) );
+ }
+ string expectedType() const { return "BtreeCursor _id_"; }
+ BSONObj query() const { return BSON( "_id" << 5 ); }
+ };
+
+ class OptimalIndex : public Base {
+ public:
+ OptimalIndex() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 5 ) );
+ _cli.insert( ns(), BSON( "a" << 6 ) );
+ }
+ string expectedType() const { return "BtreeCursor a_1"; }
+ BSONObj query() const { return BSON( "a" << GTE << 5 ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "a" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( 6, c->current().getIntField( "a" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class SimpleKeyMatch : public Base {
+ public:
+ SimpleKeyMatch() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.update( ns(), BSONObj(), BSON( "$set" << BSON( "a" << true ) ) );
+ }
+ string expectedType() const { return "BtreeCursor a_1"; }
+ bool expectSimpleEquality() const { return true; }
+ BSONObj query() const { return BSON( "a" << true ); }
+ virtual void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class Geo : public Base {
+ public:
+ Geo() {
+ _cli.insert( ns(), BSON( "_id" << 44 << "loc" << BSON_ARRAY( 44 << 45 ) ) );
+ _cli.ensureIndex( ns(), BSON( "loc" << "2d" ) );
+ }
+ string expectedType() const { return "GeoSearchCursor"; }
+ BSONObj query() const { return fromjson( "{ loc : { $near : [50,50] } }" ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT_EQUALS( 44, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class OutOfOrder : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSONObj(), BSON( "b" << 1 ) );
+ ASSERT( !c );
+ }
+ };
+
+ class BestSavedOutOfOrder : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 << "b" << BSON_ARRAY( 1 << 2 << 3 << 4 << 5 ) ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 6 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ // record {_id:1} index for this query
+ ASSERT( _cli.query( ns(), QUERY( "_id" << GT << 0 << "b" << GT << 0 ).sort( "b" ) )->more() );
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSON( "_id" << GT << 0 << "b" << GT << 0 ), BSON( "b" << 1 ) );
+ // {_id:1} requires scan and order, so {b:1} must be chosen.
+ ASSERT( c );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ }
+ };
+
+ /**
+ * If an optimal plan is a candidate, return a cursor for it rather than a QueryOptimizerCursor. Avoid
+ * caching optimal plans since simple cursors will not save a plan anyway (so in the most common case optimal
+ * plans won't be cached) and because this simplifies the implementation for selecting a simple cursor.
+ */
+ class BestSavedOptimal : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 << "q" << 1 ) );
+ // {_id:1} index not recorded for these queries since it is an optimal index.
+ ASSERT( _cli.query( ns(), QUERY( "_id" << GT << 0 ) )->more() );
+ ASSERT( _cli.query( ns(), QUERY( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) ) ) )->more() );
+ dblock lk;
+ Client::Context ctx( ns() );
+ // Check that no plan was recorded for this query.
+ ASSERT( BSONObj().woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "_id" << GT << 0 ), true ).pattern() ) ) == 0 );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSON( "_id" << GT << 0 ) );
+ // No need for query optimizer cursor since the plan is optimal.
+ ASSERT_EQUALS( "BtreeCursor _id_", c->toString() );
+ }
+ };
+
+ /** If a no optimal plan is a candidate a QueryOptimizerCursor should be returned, even if plan has been recorded. */
+ class BestSavedNotOptimal : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "q" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "q" << 1 ) );
+ // Record {_id:1} index for this query
+ ASSERT( _cli.query( ns(), QUERY( "q" << 1 << "_id" << 1 ) )->more() );
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( BSON( "_id" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "q" << 1 << "_id" << 1 ), true ).pattern() ) ) == 0 );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSON( "q" << 1 << "_id" << 1 ) );
+ // Need query optimizer cursor since the cached plan is not optimal.
+ ASSERT_EQUALS( "QueryOptimizerCursor", c->toString() );
+ }
+ };
+
+ class MultiIndex : public Base {
+ public:
+ MultiIndex() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ BSONObj query() const { return BSON( "_id" << GT << 0 << "a" << GT << 0 ); }
+ void check( const shared_ptr<Cursor> &c ) {}
+ };
+
+ class RequireIndexNoConstraints : public Base {
+ bool requireIndex() const { return true; }
+ bool expectException() const { return true; }
+ };
+
+ class RequireIndexSimpleId : public Base {
+ bool requireIndex() const { return true; }
+ string expectedType() const { return "BtreeCursor _id_"; }
+ BSONObj query() const { return BSON( "_id" << 5 ); }
+ };
+
+ class RequireIndexUnindexedQuery : public Base {
+ bool requireIndex() const { return true; }
+ bool expectException() const { return true; }
+ BSONObj query() const { return BSON( "a" << GTE << 5 ); }
+ };
+
+ class RequireIndexIndexedQuery : public Base {
+ public:
+ RequireIndexIndexedQuery() {
+ _cli.insert( ns(), BSON( "_id" << 6 << "a" << 6 << "c" << 4 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 << "c" << 1 ) );
+ }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ bool requireIndex() const { return true; }
+ BSONObj query() const { return BSON( "a" << GTE << 5 << "c" << 4 ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT_EQUALS( 6, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class RequireIndexSecondOrClauseIndexed : public Base {
+ public:
+ RequireIndexSecondOrClauseIndexed() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "b" << 1 ) );
+ }
+ bool requireIndex() const { return true; }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ BSONObj query() const { return fromjson( "{$or:[{a:1},{b:1}]}" ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT( c->advance() );
+ ASSERT( !c->advance() ); // 2 matches exactly
+ }
+ };
+
+ class RequireIndexSecondOrClauseUnindexed : public Base {
+ public:
+ RequireIndexSecondOrClauseUnindexed() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ }
+ bool requireIndex() const { return true; }
+ bool expectException() const { return true; }
+ BSONObj query() const { return fromjson( "{$or:[{a:1},{b:1}]}" ); }
+ };
+
+ class RequireIndexSecondOrClauseUnindexedUndetected : public Base {
+ public:
+ RequireIndexSecondOrClauseUnindexedUndetected() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "b" << 1 ) );
+ }
+ bool requireIndex() const { return true; }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ BSONObj query() const { return fromjson( "{$or:[{a:1},{b:1}]}" ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ // An unindexed cursor is required for the second clause, but is not allowed.
+ ASSERT_THROWS( c->advance(), MsgAssertionException );
+ }
+ };
+
+ } // namespace GetCursor
+
+ class All : public Suite {
+ public:
+ All() : Suite( "queryoptimizercursor" ) {}
+
+ void setupTests() {
+ __forceLinkGeoPlugin();
+ add<QueryOptimizerCursorTests::CachedMatchCounterCount>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterAccumulate>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterDedup>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterNscanned>();
+ add<QueryOptimizerCursorTests::SmallDupSetUpgrade>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterCount>();
+ add<QueryOptimizerCursorTests::SmallDupSetUpgradeRead>();
+ add<QueryOptimizerCursorTests::SmallDupSetUpgradeWrite>();
+ add<QueryOptimizerCursorTests::Empty>();
+ add<QueryOptimizerCursorTests::Unindexed>();
+ add<QueryOptimizerCursorTests::Basic>();
+ add<QueryOptimizerCursorTests::NoMatch>();
+ add<QueryOptimizerCursorTests::Interleaved>();
+ add<QueryOptimizerCursorTests::NotMatch>();
+ add<QueryOptimizerCursorTests::StopInterleaving>();
+ add<QueryOptimizerCursorTests::TakeoverWithDup>();
+ add<QueryOptimizerCursorTests::TakeoverWithNonMatches>();
+ add<QueryOptimizerCursorTests::TakeoverWithTakeoverDup>();
+ add<QueryOptimizerCursorTests::BasicOr>();
+ add<QueryOptimizerCursorTests::OrFirstClauseEmpty>();
+ add<QueryOptimizerCursorTests::OrSecondClauseEmpty>();
+ add<QueryOptimizerCursorTests::OrMultipleClausesEmpty>();
+ add<QueryOptimizerCursorTests::TakeoverCountOr>();
+ add<QueryOptimizerCursorTests::TakeoverEndOfOrClause>();
+ add<QueryOptimizerCursorTests::TakeoverBeforeEndOfOrClause>();
+ add<QueryOptimizerCursorTests::TakeoverAfterEndOfOrClause>();
+ add<QueryOptimizerCursorTests::ManualMatchingDeduping>();
+ add<QueryOptimizerCursorTests::ManualMatchingUsingCurrKey>();
+ add<QueryOptimizerCursorTests::ManualMatchingDedupingTakeover>();
+ add<QueryOptimizerCursorTests::Singlekey>();
+ add<QueryOptimizerCursorTests::Multikey>();
+ add<QueryOptimizerCursorTests::AddOtherPlans>();
+ add<QueryOptimizerCursorTests::AddOtherPlansDelete>();
+ add<QueryOptimizerCursorTests::AddOtherPlansContinuousDelete>();
+ add<QueryOptimizerCursorTests::OrRangeElimination>();
+ add<QueryOptimizerCursorTests::OrDedup>();
+ add<QueryOptimizerCursorTests::EarlyDups>();
+ add<QueryOptimizerCursorTests::OrPopInTakeover>();
+ add<QueryOptimizerCursorTests::OrCollectionScanAbort>();
+ add<QueryOptimizerCursorTests::YieldNoOp>();
+ add<QueryOptimizerCursorTests::YieldDelete>();
+ add<QueryOptimizerCursorTests::YieldDeleteContinue>();
+ add<QueryOptimizerCursorTests::YieldDeleteContinueFurther>();
+ add<QueryOptimizerCursorTests::YieldUpdate>();
+ add<QueryOptimizerCursorTests::YieldDrop>();
+ add<QueryOptimizerCursorTests::YieldDropOr>();
+ add<QueryOptimizerCursorTests::YieldRemoveOr>();
+ add<QueryOptimizerCursorTests::YieldCappedOverwrite>();
+ add<QueryOptimizerCursorTests::YieldDropIndex>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansNoOp>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansAdvanceNoOp>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansDelete>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansDeleteOr>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansDeleteOrAdvance>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwrite>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwriteManual>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwriteManual2>();
+ add<QueryOptimizerCursorTests::YieldTakeover>();
+ add<QueryOptimizerCursorTests::YieldTakeoverBasic>();
+ add<QueryOptimizerCursorTests::YieldInactiveCursorAdvance>();
+ add<QueryOptimizerCursorTests::OrderId>();
+ add<QueryOptimizerCursorTests::OrderMultiIndex>();
+ add<QueryOptimizerCursorTests::OrderReject>();
+ add<QueryOptimizerCursorTests::OrderNatural>();
+ add<QueryOptimizerCursorTests::OrderUnindexed>();
+ add<QueryOptimizerCursorTests::RecordedOrderInvalid>();
+ add<QueryOptimizerCursorTests::KillOp>();
+ add<QueryOptimizerCursorTests::KillOpFirstClause>();
+ add<QueryOptimizerCursorTests::Nscanned>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterate>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateDelete>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateDeleteMultiple>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateTakeover>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateTakeoverDeleteMultiple>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateUnindexedTakeoverDeleteMultiple>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateTakeoverDeleteMultipleMultiAdvance>();
+ add<QueryOptimizerCursorTests::InitialCappedWrapYieldRecoveryFailure>();
+ add<QueryOptimizerCursorTests::TakeoverCappedWrapYieldRecoveryFailure>();
+ add<QueryOptimizerCursorTests::GetCursor::NoConstraints>();
+ add<QueryOptimizerCursorTests::GetCursor::SimpleId>();
+ add<QueryOptimizerCursorTests::GetCursor::OptimalIndex>();
+ add<QueryOptimizerCursorTests::GetCursor::SimpleKeyMatch>();
+ add<QueryOptimizerCursorTests::GetCursor::Geo>();
+ add<QueryOptimizerCursorTests::GetCursor::OutOfOrder>();
+ add<QueryOptimizerCursorTests::GetCursor::BestSavedOutOfOrder>();
+ add<QueryOptimizerCursorTests::GetCursor::BestSavedOptimal>();
+ add<QueryOptimizerCursorTests::GetCursor::BestSavedNotOptimal>();
+ add<QueryOptimizerCursorTests::GetCursor::MultiIndex>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexNoConstraints>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSimpleId>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexUnindexedQuery>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexIndexedQuery>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSecondOrClauseIndexed>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSecondOrClauseUnindexed>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSecondOrClauseUnindexedUndetected>();
+ }
+ } myall;
+
+} // namespace QueryOptimizerTests
+
diff --git a/src/mongo/dbtests/queryoptimizertests.cpp b/src/mongo/dbtests/queryoptimizertests.cpp
new file mode 100644
index 00000000000..8da13578b45
--- /dev/null
+++ b/src/mongo/dbtests/queryoptimizertests.cpp
@@ -0,0 +1,1063 @@
+// queryoptimizertests.cpp : query optimizer unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/queryoptimizer.h"
+#include "../db/instance.h"
+#include "../db/ops/count.h"
+#include "../db/ops/query.h"
+#include "../db/ops/delete.h"
+#include "dbtests.h"
+
+
+namespace mongo {
+ extern BSONObj id_obj;
+ void runQuery(Message& m, QueryMessage& q, Message &response ) {
+ CurOp op( &(cc()) );
+ op.ensureStarted();
+ runQuery( m , q , op, response );
+ }
+ void runQuery(Message& m, QueryMessage& q ) {
+ Message response;
+ runQuery( m, q, response );
+ }
+ void __forceLinkGeoPlugin();
+} // namespace mongo
+
+namespace QueryOptimizerTests {
+
+ void dropCollection( const char *ns ) {
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( ns, errmsg, result );
+ }
+
+ namespace QueryPlanTests {
+
+ using boost::shared_ptr;
+
+ class Base {
+ public:
+ Base() : _ctx( ns() ) , indexNum_( 0 ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ ~Base() {
+ if ( !nsd() )
+ return;
+ dropCollection( ns() );
+ }
+ protected:
+ static const char *ns() { return "unittests.QueryPlanTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ IndexDetails *index( const BSONObj &key ) {
+ stringstream ss;
+ ss << indexNum_++;
+ string name = ss.str();
+ client_.resetIndexCache();
+ client_.ensureIndex( ns(), key, false, name.c_str() );
+ NamespaceDetails *d = nsd();
+ for( int i = 0; i < d->nIndexes; ++i ) {
+ if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
+ return &d->idx(i);
+ }
+ assert( false );
+ return 0;
+ }
+ int indexno( const BSONObj &key ) {
+ return nsd()->idxNo( *index(key) );
+ }
+ BSONObj startKey( const QueryPlan &p ) const {
+ return p.frv()->startKey();
+ }
+ BSONObj endKey( const QueryPlan &p ) const {
+ return p.frv()->endKey();
+ }
+ private:
+ dblock lk_;
+ Client::Context _ctx;
+ int indexNum_;
+ static DBDirectClient client_;
+ };
+ DBDirectClient Base::client_;
+
+ // There's a limit of 10 indexes total, make sure not to exceed this in a given test.
+#define INDEXNO(x) nsd()->idxNo( *this->index( BSON(x) ) )
+#define INDEX(x) this->index( BSON(x) )
+ auto_ptr< FieldRangeSetPair > FieldRangeSetPair_GLOBAL;
+#define FRSP(x) ( FieldRangeSetPair_GLOBAL.reset( new FieldRangeSetPair( ns(), x ) ), *FieldRangeSetPair_GLOBAL )
+ auto_ptr< FieldRangeSetPair > FieldRangeSetPair_GLOBAL2;
+#define FRSP2(x) ( FieldRangeSetPair_GLOBAL2.reset( new FieldRangeSetPair( ns(), x ) ), FieldRangeSetPair_GLOBAL2.get() )
+
+ class NoIndex : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), -1, FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSONObj() );
+ ASSERT( !p.optimal() );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !p.exactKeyMatch() );
+ }
+ };
+
+ class SimpleOrder : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendMinKey( "" );
+ BSONObj start = b.obj();
+ BSONObjBuilder b2;
+ b2.appendMaxKey( "" );
+ BSONObj end = b2.obj();
+
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << 1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "b" << 1 ) );
+ ASSERT( p3.scanAndOrderRequired() );
+ ASSERT( !startKey( p3 ).woCompare( start ) );
+ ASSERT( !endKey( p3 ).woCompare( end ) );
+ }
+ };
+
+ class MoreIndexThanNeeded : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ }
+ };
+
+ class IndexSigns : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << -1 ) , FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT_EQUALS( 1, p.direction() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ ASSERT( p2.scanAndOrderRequired() );
+ ASSERT_EQUALS( 0, p2.direction() );
+ QueryPlan p3( nsd(), indexno( id_obj ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "_id" << 1 ) );
+ ASSERT( !p3.scanAndOrderRequired() );
+ ASSERT_EQUALS( 1, p3.direction() );
+ }
+ };
+
+ class IndexReverse : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendMinKey( "" );
+ b.appendMaxKey( "" );
+ BSONObj start = b.obj();
+ BSONObjBuilder b2;
+ b2.appendMaxKey( "" );
+ b2.appendMinKey( "" );
+ BSONObj end = b2.obj();
+ QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ),FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT_EQUALS( -1, p.direction() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT_EQUALS( -1, p2.direction() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << -1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
+ ASSERT( p3.scanAndOrderRequired() );
+ ASSERT_EQUALS( 0, p3.direction() );
+ }
+ };
+
+ class NoOrder : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "", 3 );
+ b.appendMinKey( "" );
+ BSONObj start = b.obj();
+ BSONObjBuilder b2;
+ b2.append( "", 3 );
+ b2.appendMaxKey( "" );
+ BSONObj end = b2.obj();
+ QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FRSP( BSON( "a" << 3 ) ), FRSP2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FRSP( BSON( "a" << 3 ) ), FRSP2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
+ ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ }
+ };
+
+ class EqualWithOrder : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 4 ) ), FRSP2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "b" << 4 ) ), FRSP2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 4 ) ), FRSP2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
+ ASSERT( p3.scanAndOrderRequired() );
+ }
+ };
+
+ class Optimal : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( p.optimal() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( p2.optimal() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p3.optimal() );
+ QueryPlan p4( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( !p4.optimal() );
+ QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "b" << 1 ) );
+ ASSERT( p5.optimal() );
+ QueryPlan p6( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "b" << 1 ) );
+ ASSERT( !p6.optimal() );
+ QueryPlan p7( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 << "b" << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << 1 ) ), BSON( "a" << 1 << "b" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p7.optimal() );
+ QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p8.optimal() );
+ QueryPlan p9( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p9.optimal() );
+ }
+ };
+
+ class MoreOptimal : public Base {
+ public:
+ void run() {
+ QueryPlan p10( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSONObj() );
+ ASSERT( p10.optimal() );
+ QueryPlan p11( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSONObj() );
+ ASSERT( p11.optimal() );
+ QueryPlan p12( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << LT << 1 ) ), FRSP2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSONObj() );
+ ASSERT( p12.optimal() );
+ QueryPlan p13( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << LT << 1 ) ), FRSP2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p13.optimal() );
+ }
+ };
+
+ class KeyMatch : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p.exactKeyMatch() );
+ QueryPlan p2( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p2.exactKeyMatch() );
+ QueryPlan p3( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FRSP( BSON( "b" << "z" ) ), FRSP2( BSON( "b" << "z" ) ), BSON( "b" << "z" ), BSON( "a" << 1 ) );
+ ASSERT( !p3.exactKeyMatch() );
+ QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << "y" << "b" << "z" ) ), FRSP2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSON( "a" << 1 ) );
+ ASSERT( !p4.exactKeyMatch() );
+ QueryPlan p5( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << "y" << "b" << "z" ) ), FRSP2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSONObj() );
+ ASSERT( !p5.exactKeyMatch() );
+ QueryPlan p6( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), FRSP2( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), BSON( "c" << LT << "y" << "b" << GT << "z" ), BSONObj() );
+ ASSERT( !p6.exactKeyMatch() );
+ QueryPlan p7( nsd(), INDEXNO( "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p7.exactKeyMatch() );
+ QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << "y" << "a" << "z" ) ), FRSP2( BSON( "b" << "y" << "a" << "z" ) ), BSON( "b" << "y" << "a" << "z" ), BSONObj() );
+ ASSERT( p8.exactKeyMatch() );
+ QueryPlan p9( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "z" ) ), FRSP2( BSON( "a" << "z" ) ), BSON( "a" << "z" ), BSON( "a" << 1 ) );
+ ASSERT( p9.exactKeyMatch() );
+ }
+ };
+
+ class MoreKeyMatch : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "r" << "b" << NE << "q" ) ), FRSP2( BSON( "a" << "r" << "b" << NE << "q" ) ), BSON( "a" << "r" << "b" << NE << "q" ), BSON( "a" << 1 ) );
+ ASSERT( !p.exactKeyMatch() );
+ }
+ };
+
+ class ExactKeyQueryTypes : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "b" ) ), FRSP2( BSON( "a" << "b" ) ), BSON( "a" << "b" ), BSONObj() );
+ ASSERT( p.exactKeyMatch() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << 4 ) ), FRSP2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSONObj() );
+ ASSERT( !p2.exactKeyMatch() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << BSON( "c" << "d" ) ) ), FRSP2( BSON( "a" << BSON( "c" << "d" ) ) ), BSON( "a" << BSON( "c" << "d" ) ), BSONObj() );
+ ASSERT( !p3.exactKeyMatch() );
+ BSONObjBuilder b;
+ b.appendRegex( "a", "^ddd" );
+ BSONObj q = b.obj();
+ QueryPlan p4( nsd(), INDEXNO( "a" << 1 ), FRSP( q ), FRSP2( q ), q, BSONObj() );
+ ASSERT( !p4.exactKeyMatch() );
+ QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << "z" << "b" << 4 ) ), FRSP2( BSON( "a" << "z" << "b" << 4 ) ), BSON( "a" << "z" << "b" << 4 ), BSONObj() );
+ ASSERT( !p5.exactKeyMatch() );
+ }
+ };
+
+ class Unhelpful : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSONObj() );
+ ASSERT( !p.range( "a" ).nontrivial() );
+ ASSERT( p.unhelpful() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 << "c" << 1 ) ), FRSP2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT( !p2.range( "a" ).nontrivial() );
+ ASSERT( !p2.unhelpful() );
+ QueryPlan p3( nsd(), INDEXNO( "b" << 1 ), FRSP( BSON( "b" << 1 << "c" << 1 ) ), FRSP2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSONObj() );
+ ASSERT( p3.range( "b" ).nontrivial() );
+ ASSERT( !p3.unhelpful() );
+ QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "c" << 1 ), FRSP( BSON( "c" << 1 << "d" << 1 ) ), FRSP2( BSON( "c" << 1 << "d" << 1 ) ), BSON( "c" << 1 << "d" << 1 ), BSONObj() );
+ ASSERT( !p4.range( "b" ).nontrivial() );
+ ASSERT( p4.unhelpful() );
+ }
+ };
+
+ } // namespace QueryPlanTests
+
+ namespace QueryPlanSetTests {
+ class Base {
+ public:
+ Base() : _context( ns() ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ NamespaceDetailsTransient::get_inlock( ns() ).clearQueryCache();
+ dropCollection( ns() );
+ }
+ static void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
+ // see query.h for the protocol we are using here.
+ BufBuilder b;
+ int opts = queryOptions;
+ b.appendNum(opts);
+ b.appendStr(ns);
+ b.appendNum(nToSkip);
+ b.appendNum(nToReturn);
+ query.appendSelfToBufBuilder(b);
+ if ( fieldsToReturn )
+ fieldsToReturn->appendSelfToBufBuilder(b);
+ toSend.setData(dbQuery, b.buf(), b.len());
+ }
+ protected:
+ static const char *ns() { return "unittests.QueryPlanSetTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class NoIndexes : public Base {
+ public:
+ void run() {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class Optimal : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "b_2" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSONObj() );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class NoOptimal : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 3, s.nPlans() );
+ }
+ };
+
+ class NoSpec : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSONObj() ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSONObj(), BSONObj() );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class HintSpec : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj b = BSON( "hint" << BSON( "a" << 1 ) );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class HintName : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj b = BSON( "hint" << "a_1" );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class NaturalHint : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj b = BSON( "hint" << BSON( "$natural" << 1 ) );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class NaturalSort : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "b_2" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "$natural" << 1 ) );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class BadHint : public Base {
+ public:
+ void run() {
+ BSONObj b = BSON( "hint" << "a_1" );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ ASSERT_THROWS( QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e ),
+ AssertionException );
+ }
+ };
+
+ class Count : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ string err;
+ ASSERT_EQUALS( 0, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ BSONObj one = BSON( "a" << 1 );
+ BSONObj fourA = BSON( "a" << 4 );
+ BSONObj fourB = BSON( "a" << 4 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ ASSERT_EQUALS( 0, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ theDataFileMgr.insertWithObjMod( ns(), fourA );
+ ASSERT_EQUALS( 1, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ theDataFileMgr.insertWithObjMod( ns(), fourB );
+ ASSERT_EQUALS( 2, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ ASSERT_EQUALS( 3, runCount( ns(), BSON( "query" << BSONObj() ), err ) );
+ ASSERT_EQUALS( 3, runCount( ns(), BSON( "query" << BSON( "a" << GT << 0 ) ), err ) );
+ // missing ns
+ ASSERT_EQUALS( -1, runCount( "unittests.missingNS", BSONObj(), err ) );
+ // impossible match
+ ASSERT_EQUALS( 0, runCount( ns(), BSON( "query" << BSON( "a" << GT << 0 << LT << -1 ) ), err ) );
+ }
+ };
+
+ class QueryMissingNs : public Base {
+ public:
+ QueryMissingNs() { log() << "querymissingns starts" << endl; }
+ ~QueryMissingNs() {
+ log() << "end QueryMissingNs" << endl;
+ }
+ void run() {
+ Message m;
+ assembleRequest( "unittests.missingNS", BSONObj(), 0, 0, 0, 0, m );
+ DbMessage d(m);
+ QueryMessage q(d);
+ Message ret;
+ runQuery( m, q, ret );
+ ASSERT_EQUALS( 0, ((QueryResult*)ret.header())->nReturned );
+ }
+
+ };
+
+ class UnhelpfulIndex : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 << "c" << 2 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 << "c" << 2 ), BSONObj() );
+ ASSERT_EQUALS( 2, s.nPlans() );
+ }
+ };
+
+ class SingleException : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 3, s.nPlans() );
+ bool threw = false;
+ auto_ptr< TestOp > t( new TestOp( true, threw ) );
+ boost::shared_ptr< TestOp > done = s.runOp( *t );
+ ASSERT( threw );
+ ASSERT( done->complete() );
+ ASSERT( done->exception().empty() );
+ ASSERT( !done->error() );
+ }
+ private:
+ class TestOp : public QueryOp {
+ public:
+ TestOp( bool iThrow, bool &threw ) : iThrow_( iThrow ), threw_( threw ), i_(), youThrow_( false ) {}
+ virtual void _init() {}
+ virtual void next() {
+ if ( iThrow_ )
+ threw_ = true;
+ massert( 10408 , "throw", !iThrow_ );
+ if ( ++i_ > 10 )
+ setComplete();
+ }
+ virtual QueryOp *_createChild() const {
+ QueryOp *op = new TestOp( youThrow_, threw_ );
+ youThrow_ = !youThrow_;
+ return op;
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 0; }
+ private:
+ bool iThrow_;
+ bool &threw_;
+ int i_;
+ mutable bool youThrow_;
+ };
+ };
+
+ class AllException : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 3, s.nPlans() );
+ auto_ptr< TestOp > t( new TestOp() );
+ boost::shared_ptr< TestOp > done = s.runOp( *t );
+ ASSERT( !done->complete() );
+ ASSERT_EQUALS( "throw", done->exception().msg );
+ ASSERT( done->error() );
+ }
+ private:
+ class TestOp : public QueryOp {
+ public:
+ virtual void _init() {}
+ virtual void next() {
+ massert( 10409 , "throw", false );
+ }
+ virtual QueryOp *_createChild() const {
+ return new TestOp();
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 0; }
+ };
+ };
+
+ class SaveGoodIndex : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ // No best plan - all must be tried.
+ nPlans( 3 );
+ runQuery();
+ // Best plan selected by query.
+ nPlans( 1 );
+ nPlans( 1 );
+ Helpers::ensureIndex( ns(), BSON( "c" << 1 ), false, "c_1" );
+ // Best plan cleared when new index added.
+ nPlans( 3 );
+ runQuery();
+ // Best plan selected by query.
+ nPlans( 1 );
+
+ {
+ DBDirectClient client;
+ for( int i = 0; i < 334; ++i ) {
+ client.insert( ns(), BSON( "i" << i ) );
+ client.update( ns(), QUERY( "i" << i ), BSON( "i" << i + 1 ) );
+ client.remove( ns(), BSON( "i" << i + 1 ) );
+ }
+ }
+ // Best plan cleared by ~1000 writes.
+ nPlans( 3 );
+
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ NoRecordTestOp original;
+ s.runOp( original );
+ // NoRecordTestOp doesn't record a best plan (test cases where mayRecordPlan() is false).
+ nPlans( 3 );
+
+ BSONObj hint = fromjson( "{hint:{$natural:1}}" );
+ BSONElement hintElt = hint.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp2( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig2( new FieldRangeSetPair( *frsp2 ) );
+ QueryPlanSet s2( ns(), frsp2, frspOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ), true, &hintElt );
+ TestOp newOriginal;
+ s2.runOp( newOriginal );
+ // No plan recorded when a hint is used.
+ nPlans( 3 );
+
+ auto_ptr< FieldRangeSetPair > frsp3( new FieldRangeSetPair( ns(), BSON( "a" << 4 ), true ) );
+ auto_ptr< FieldRangeSetPair > frspOrig3( new FieldRangeSetPair( *frsp3 ) );
+ QueryPlanSet s3( ns(), frsp3, frspOrig3, BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) );
+ TestOp newerOriginal;
+ s3.runOp( newerOriginal );
+ // Plan recorded was for a different query pattern (different sort spec).
+ nPlans( 3 );
+
+ // Best plan still selected by query after all these other tests.
+ runQuery();
+ nPlans( 1 );
+ }
+ private:
+ void nPlans( int n ) {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( n, s.nPlans() );
+ }
+ void runQuery() {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ TestOp original;
+ s.runOp( original );
+ }
+ class TestOp : public QueryOp {
+ public:
+ virtual void _init() {}
+ virtual void next() {
+ setComplete();
+ }
+ virtual QueryOp *_createChild() const {
+ return new TestOp();
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 0; }
+ };
+ class NoRecordTestOp : public TestOp {
+ virtual bool mayRecordPlan() const { return false; }
+ virtual QueryOp *_createChild() const { return new NoRecordTestOp(); }
+ };
+ };
+
+ class TryAllPlansOnErr : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ScanOnlyTestOp op;
+ s.runOp( op );
+ pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( s.frsp(), BSON( "b" << 1 ) );
+ ASSERT( fromjson( "{$natural:1}" ).woCompare( best.first ) == 0 );
+ ASSERT_EQUALS( 1, best.second );
+
+ auto_ptr< FieldRangeSetPair > frsp2( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig2( new FieldRangeSetPair( *frsp2 ) );
+ QueryPlanSet s2( ns(), frsp2, frspOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ TestOp op2;
+ ASSERT( s2.runOp( op2 )->complete() );
+ }
+ private:
+ class TestOp : public QueryOp {
+ public:
+ TestOp() {}
+ virtual void _init() {}
+ virtual void next() {
+ if ( qp().indexKey().firstElementFieldName() == string( "$natural" ) )
+ massert( 10410 , "throw", false );
+ setComplete();
+ }
+ virtual QueryOp *_createChild() const {
+ return new TestOp();
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 1; }
+ };
+ class ScanOnlyTestOp : public TestOp {
+ virtual void next() {
+ if ( qp().indexKey().firstElement().fieldName() == string( "$natural" ) )
+ setComplete();
+ massert( 10411 , "throw", false );
+ }
+ virtual QueryOp *_createChild() const {
+ return new ScanOnlyTestOp();
+ }
+ };
+ };
+
+ class FindOne : public Base {
+ public:
+ void run() {
+ BSONObj one = BSON( "a" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ BSONObj result;
+ ASSERT( Helpers::findOne( ns(), BSON( "a" << 1 ), result ) );
+ ASSERT_THROWS( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ), AssertionException );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ ASSERT( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ) );
+ }
+ };
+
+ class Delete : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ for( int i = 0; i < 200; ++i ) {
+ BSONObj two = BSON( "a" << 2 );
+ theDataFileMgr.insertWithObjMod( ns(), two );
+ }
+ BSONObj one = BSON( "a" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ BSONObj delSpec = BSON( "a" << 1 << "_id" << NE << 0 );
+ deleteObjects( ns(), delSpec, false );
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), delSpec, true ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 1, NamespaceDetailsTransient::get_inlock( ns() ).nScannedForPattern( FieldRangeSet( ns(), delSpec, true ).pattern() ) );
+ }
+ };
+
+ class DeleteOneScan : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "_id" << 1 ), false, "_id_1" );
+ BSONObj one = BSON( "_id" << 3 << "a" << 1 );
+ BSONObj two = BSON( "_id" << 2 << "a" << 1 );
+ BSONObj three = BSON( "_id" << 1 << "a" << -1 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ theDataFileMgr.insertWithObjMod( ns(), two );
+ theDataFileMgr.insertWithObjMod( ns(), three );
+ deleteObjects( ns(), BSON( "_id" << GT << 0 << "a" << GT << 0 ), true );
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() ); c->ok(); c->advance() )
+ ASSERT( 3 != c->current().getIntField( "_id" ) );
+ }
+ };
+
+ class DeleteOneIndex : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a" );
+ BSONObj one = BSON( "a" << 2 << "_id" << 0 );
+ BSONObj two = BSON( "a" << 1 << "_id" << 1 );
+ BSONObj three = BSON( "a" << 0 << "_id" << 2 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ theDataFileMgr.insertWithObjMod( ns(), two );
+ theDataFileMgr.insertWithObjMod( ns(), three );
+ deleteObjects( ns(), BSON( "a" << GTE << 0 ), true );
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() ); c->ok(); c->advance() )
+ ASSERT( 2 != c->current().getIntField( "_id" ) );
+ }
+ };
+
+ class TryOtherPlansBeforeFinish : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ for( int i = 0; i < 100; ++i ) {
+ for( int j = 0; j < 2; ++j ) {
+ BSONObj temp = BSON( "a" << 100 - i - 1 << "b" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ }
+ Message m;
+ // Need to return at least 2 records to cause plan to be recorded.
+ assembleRequest( ns(), QUERY( "b" << 0 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m );
+ stringstream ss;
+ {
+ DbMessage d(m);
+ QueryMessage q(d);
+ runQuery( m, q);
+ }
+ ASSERT( BSON( "$natural" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) ) == 0 );
+
+ Message m2;
+ assembleRequest( ns(), QUERY( "b" << 99 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m2 );
+ {
+ DbMessage d(m2);
+ QueryMessage q(d);
+ runQuery( m2, q);
+ }
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 3, NamespaceDetailsTransient::get_inlock( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) );
+ }
+ };
+
+ class InQueryIntervals : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ for( int i = 0; i < 10; ++i ) {
+ BSONObj temp = BSON( "a" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ BSONObj hint = fromjson( "{$hint:{a:1}}" );
+ BSONElement hintElt = hint.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj(), true, &hintElt );
+ QueryPlan qp( nsd(), 1, s.frsp(), s.originalFrsp(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ double expected[] = { 2, 3, 6, 9 };
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT_EQUALS( expected[ i ], c->current().getField( "a" ).number() );
+ }
+ ASSERT( !c->ok() );
+
+ // now check reverse
+ {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ), true, &hintElt );
+ QueryPlan qp( nsd(), 1, s.frsp(), s.originalFrsp(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ) );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ double expected[] = { 9, 6, 3, 2 };
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT_EQUALS( expected[ i ], c->current().getField( "a" ).number() );
+ }
+ ASSERT( !c->ok() );
+ }
+ }
+ };
+
+ class EqualityThenIn : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ), false, "a_1_b_1" );
+ for( int i = 0; i < 10; ++i ) {
+ BSONObj temp = BSON( "a" << 5 << "b" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ BSONObj hint = fromjson( "{$hint:{a:1,b:1}}" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ) ) );
+ QueryPlan qp( nsd(), 1, *frsp, frsp.get(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ double expected[] = { 2, 3, 6, 9 };
+ ASSERT( c->ok() );
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( expected[ i ], c->current().getField( "b" ).number() );
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ class NotEqualityThenIn : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ), false, "a_1_b_1" );
+ for( int i = 0; i < 10; ++i ) {
+ BSONObj temp = BSON( "a" << 5 << "b" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ BSONObj hint = fromjson( "{$hint:{a:1,b:1}}" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ) ) );
+ QueryPlan qp( nsd(), 1, *frsp, frsp.get(), fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ int matches[] = { 2, 3, 6, 9 };
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT_EQUALS( matches[ i ], c->current().getField( "b" ).number() );
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Exclude special plan candidate if there are btree plan candidates. SERVER-4531 */
+ class ExcludeSpecialPlanWhenBtreePlan : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << "2d" ), false, "a_2d" );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ BSONObj query = BSON( "a" << BSON_ARRAY( 0 << 0 ) << "b" << 1 );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), query ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, query, BSONObj() );
+ // Two query plans, btree and collection scan.
+ ASSERT_EQUALS( 2, s.nPlans() );
+ // Not the geo plan.
+ ASSERT( s.firstPlan()->special().empty() );
+ }
+ };
+
+ /** Exclude unindexed plan candidate if there is a special plan candidate. SERVER-4531 */
+ class ExcludeUnindexedPlanWhenSpecialPlan : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << "2d" ), false, "a_2d" );
+ BSONObj query = BSON( "a" << BSON_ARRAY( 0 << 0 ) << "b" << 1 );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), query ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, query, BSONObj() );
+ // Single query plan.
+ ASSERT_EQUALS( 1, s.nPlans() );
+ // It's the geo plan.
+ ASSERT( !s.firstPlan()->special().empty() );
+ }
+ };
+
+ } // namespace QueryPlanSetTests
+
+ class Base {
+ public:
+ Base() : _ctx( ns() ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ ~Base() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ dropCollection( ns() );
+ }
+ protected:
+ static const char *ns() { return "unittests.QueryOptimizerTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ private:
+ dblock lk_;
+ Client::Context _ctx;
+ };
+
+ class BestGuess : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj temp = BSON( "a" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ temp = BSON( "b" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+
+ boost::shared_ptr< Cursor > c = bestGuessCursor( ns(), BSON( "b" << 1 ), BSON( "a" << 1 ) );
+ ASSERT_EQUALS( string( "a" ), c->indexKeyPattern().firstElement().fieldName() );
+ c = bestGuessCursor( ns(), BSON( "a" << 1 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( string( "b" ), c->indexKeyPattern().firstElementFieldName() );
+ boost::shared_ptr< MultiCursor > m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{b:1,$or:[{z:1}]}" ), BSON( "a" << 1 ) ) );
+ ASSERT_EQUALS( string( "a" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
+ m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
+ ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElementFieldName() );
+
+ FieldRangeSet frs( "ns", BSON( "a" << 1 ), true );
+ {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( frs.pattern( BSON( "b" << 1 ) ), BSON( "a" << 1 ), 0 );
+ }
+ m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
+ ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
+ }
+ };
+
+ class BestGuessOrSortAssertion : public Base {
+ public:
+ void run() {
+ ASSERT_THROWS( bestGuessCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "b" << 1 ) ) ), BSON( "a" << 1 ) ), MsgAssertionException );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "queryoptimizer" ) {}
+
+ void setupTests() {
+ __forceLinkGeoPlugin();
+ add<QueryPlanTests::NoIndex>();
+ add<QueryPlanTests::SimpleOrder>();
+ add<QueryPlanTests::MoreIndexThanNeeded>();
+ add<QueryPlanTests::IndexSigns>();
+ add<QueryPlanTests::IndexReverse>();
+ add<QueryPlanTests::NoOrder>();
+ add<QueryPlanTests::EqualWithOrder>();
+ add<QueryPlanTests::Optimal>();
+ add<QueryPlanTests::MoreOptimal>();
+ add<QueryPlanTests::KeyMatch>();
+ add<QueryPlanTests::MoreKeyMatch>();
+ add<QueryPlanTests::ExactKeyQueryTypes>();
+ add<QueryPlanTests::Unhelpful>();
+ add<QueryPlanSetTests::NoIndexes>();
+ add<QueryPlanSetTests::Optimal>();
+ add<QueryPlanSetTests::NoOptimal>();
+ add<QueryPlanSetTests::NoSpec>();
+ add<QueryPlanSetTests::HintSpec>();
+ add<QueryPlanSetTests::HintName>();
+ add<QueryPlanSetTests::NaturalHint>();
+ add<QueryPlanSetTests::NaturalSort>();
+ add<QueryPlanSetTests::BadHint>();
+ add<QueryPlanSetTests::Count>();
+ add<QueryPlanSetTests::QueryMissingNs>();
+ add<QueryPlanSetTests::UnhelpfulIndex>();
+ add<QueryPlanSetTests::SingleException>();
+ add<QueryPlanSetTests::AllException>();
+ add<QueryPlanSetTests::SaveGoodIndex>();
+ add<QueryPlanSetTests::TryAllPlansOnErr>();
+ add<QueryPlanSetTests::FindOne>();
+ add<QueryPlanSetTests::Delete>();
+ add<QueryPlanSetTests::DeleteOneScan>();
+ add<QueryPlanSetTests::DeleteOneIndex>();
+ add<QueryPlanSetTests::TryOtherPlansBeforeFinish>();
+ add<QueryPlanSetTests::InQueryIntervals>();
+ add<QueryPlanSetTests::EqualityThenIn>();
+ add<QueryPlanSetTests::NotEqualityThenIn>();
+ add<QueryPlanSetTests::ExcludeSpecialPlanWhenBtreePlan>();
+ add<QueryPlanSetTests::ExcludeUnindexedPlanWhenSpecialPlan>();
+ add<BestGuess>();
+ add<BestGuessOrSortAssertion>();
+ }
+ } myall;
+
+} // namespace QueryOptimizerTests
+
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
new file mode 100644
index 00000000000..9416ae20723
--- /dev/null
+++ b/src/mongo/dbtests/querytests.cpp
@@ -0,0 +1,1408 @@
+// querytests.cpp : query.{h,cpp} unit tests.
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/ops/query.h"
+#include "../db/dbhelpers.h"
+#include "../db/clientcursor.h"
+
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+
+#include "../util/timer.h"
+
+#include "dbtests.h"
+
+namespace mongo {
+ extern int __findingStartInitialTimeout;
+}
+
+namespace QueryTests {
+
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context( ns() ) {
+ addIndex( fromjson( "{\"a\":1}" ) );
+ }
+ ~Base() {
+ try {
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() );
+ vector< DiskLoc > toDelete;
+ for(; c->ok(); c->advance() )
+ toDelete.push_back( c->currLoc() );
+ for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
+ theDataFileMgr.deleteRecord( ns(), i->rec(), *i, false );
+ DBDirectClient cl;
+ cl.dropIndexes( ns() );
+ }
+ catch ( ... ) {
+ FAIL( "Exception while cleaning up collection" );
+ }
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.querytests";
+ }
+ static void addIndex( const BSONObj &key ) {
+ BSONObjBuilder b;
+ b.append( "name", key.firstElementFieldName() );
+ b.append( "ns", ns() );
+ b.append( "key", key );
+ BSONObj o = b.done();
+ stringstream indexNs;
+ indexNs << "unittests.system.indexes";
+ theDataFileMgr.insert( indexNs.str().c_str(), o.objdata(), o.objsize() );
+ }
+ static void insert( const char *s ) {
+ insert( fromjson( s ) );
+ }
+ static void insert( const BSONObj &o ) {
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize() );
+ }
+ };
+
+ class FindOne : public Base {
+ public:
+ void run() {
+ addIndex( BSON( "b" << 1 ) );
+ addIndex( BSON( "c" << 1 ) );
+ insert( BSON( "b" << 2 << "_id" << 0 ) );
+ insert( BSON( "c" << 3 << "_id" << 1 ) );
+ BSONObj query = fromjson( "{$or:[{b:2},{c:3}]}" );
+ BSONObj ret;
+ // Check findOne() returning object.
+ ASSERT( Helpers::findOne( ns(), query, ret, true ) );
+ ASSERT_EQUALS( string( "b" ), ret.firstElement().fieldName() );
+ // Cross check with findOne() returning location.
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, true ).obj() );
+ }
+ };
+
+ class FindOneRequireIndex : public Base {
+ public:
+ void run() {
+ insert( BSON( "b" << 2 << "_id" << 0 ) );
+ BSONObj query = fromjson( "{b:2}" );
+ BSONObj ret;
+
+ // Check findOne() returning object, allowing unindexed scan.
+ ASSERT( Helpers::findOne( ns(), query, ret, false ) );
+ // Check findOne() returning location, allowing unindexed scan.
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, false ).obj() );
+
+ // Check findOne() returning object, requiring indexed scan without index.
+ ASSERT_THROWS( Helpers::findOne( ns(), query, ret, true ), MsgAssertionException );
+ // Check findOne() returning location, requiring indexed scan without index.
+ ASSERT_THROWS( Helpers::findOne( ns(), query, true ), MsgAssertionException );
+
+ addIndex( BSON( "b" << 1 ) );
+ // Check findOne() returning object, requiring indexed scan with index.
+ ASSERT( Helpers::findOne( ns(), query, ret, false ) );
+ // Check findOne() returning location, requiring indexed scan with index.
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, false ).obj() );
+ }
+ };
+
+ class FindOneEmptyObj : public Base {
+ public:
+ void run() {
+ // We don't normally allow empty objects in the database, but test that we can find
+ // an empty object (one might be allowed inside a reserved namespace at some point).
+ dblock lk;
+ Client::Context ctx( "unittests.querytests" );
+ // Set up security so godinsert command can run.
+ cc().getAuthenticationInfo()->isLocalHost = true;
+ DBDirectClient cl;
+ BSONObj info;
+ ASSERT( cl.runCommand( "unittests", BSON( "godinsert" << "querytests" << "obj" << BSONObj() ), info ) );
+ insert( BSONObj() );
+ BSONObj query;
+ BSONObj ret;
+ ASSERT( Helpers::findOne( ns(), query, ret, false ) );
+ ASSERT( ret.isEmpty() );
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, false ).obj() );
+ }
+ };
+
+ class ClientBase {
+ public:
+ ClientBase() {
+ mongo::lastError.reset( new LastError() );
+ }
+ ~ClientBase() {
+ //mongo::lastError.release();
+ }
+ protected:
+ static void insert( const char *ns, BSONObj o ) {
+ client_.insert( ns, o );
+ }
+ static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
+ client_.update( ns, Query( q ), o, upsert );
+ }
+ static bool error() {
+ return !client_.getPrevError().getField( "err" ).isNull();
+ }
+ DBDirectClient &client() const { return client_; }
+
+ static DBDirectClient client_;
+ };
+ DBDirectClient ClientBase::client_;
+
+ class BoundedKey : public ClientBase {
+ public:
+ ~BoundedKey() {
+ client().dropCollection( "unittests.querytests.BoundedKey" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.BoundedKey";
+ insert( ns, BSON( "a" << 1 ) );
+ BSONObjBuilder a;
+ a.appendMaxKey( "$lt" );
+ BSONObj limit = a.done();
+ ASSERT( !client().findOne( ns, QUERY( "a" << limit ) ).isEmpty() );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT( !client().findOne( ns, QUERY( "a" << limit ).hint( BSON( "a" << 1 ) ) ).isEmpty() );
+ }
+ };
+
+ class GetMore : public ClientBase {
+ public:
+ ~GetMore() {
+ client().dropCollection( "unittests.querytests.GetMore" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.GetMore";
+ insert( ns, BSON( "a" << 1 ) );
+ insert( ns, BSON( "a" << 2 ) );
+ insert( ns, BSON( "a" << 3 ) );
+ auto_ptr< DBClientCursor > cursor = client().query( ns, BSONObj(), 2 );
+ long long cursorId = cursor->getCursorId();
+ cursor->decouple();
+ cursor.reset();
+ cursor = client().getMore( ns, cursorId );
+ ASSERT( cursor->more() );
+ ASSERT_EQUALS( 3, cursor->next().getIntField( "a" ) );
+ }
+ };
+
+ class PositiveLimit : public ClientBase {
+ public:
+ const char* ns;
+ PositiveLimit() : ns("unittests.querytests.PositiveLimit") {}
+ ~PositiveLimit() {
+ client().dropCollection( ns );
+ }
+
+ void testLimit(int limit) {
+ ASSERT_EQUALS(client().query( ns, BSONObj(), limit )->itcount(), limit);
+ }
+ void run() {
+ for(int i=0; i<1000; i++)
+ insert( ns, BSON( GENOID << "i" << i ) );
+
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1 )->itcount(), 1);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 10 )->itcount(), 10);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 101 )->itcount(), 101);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 999 )->itcount(), 999);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1000 )->itcount(), 1000);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1001 )->itcount(), 1000);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 0 )->itcount(), 1000);
+ }
+ };
+
+ class ReturnOneOfManyAndTail : public ClientBase {
+ public:
+ ~ReturnOneOfManyAndTail() {
+ client().dropCollection( "unittests.querytests.ReturnOneOfManyAndTail" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.ReturnOneOfManyAndTail";
+ client().createCollection( ns, 1024, true );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ insert( ns, BSON( "a" << 2 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, QUERY( "a" << GT << 0 ).hint( BSON( "$natural" << 1 ) ), 1, 0, 0, QueryOption_CursorTailable );
+ // If only one result requested, a cursor is not saved.
+ ASSERT_EQUALS( 0, c->getCursorId() );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 1, c->next().getIntField( "a" ) );
+ }
+ };
+
+ class TailNotAtEnd : public ClientBase {
+ public:
+ ~TailNotAtEnd() {
+ client().dropCollection( "unittests.querytests.TailNotAtEnd" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailNotAtEnd";
+ client().createCollection( ns, 2047, true );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ insert( ns, BSON( "a" << 2 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT( 0 != c->getCursorId() );
+ while( c->more() )
+ c->next();
+ ASSERT( 0 != c->getCursorId() );
+ insert( ns, BSON( "a" << 3 ) );
+ insert( ns, BSON( "a" << 4 ) );
+ insert( ns, BSON( "a" << 5 ) );
+ insert( ns, BSON( "a" << 6 ) );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 3, c->next().getIntField( "a" ) );
+ }
+ };
+
+ class EmptyTail : public ClientBase {
+ public:
+ ~EmptyTail() {
+ client().dropCollection( "unittests.querytests.EmptyTail" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.EmptyTail";
+ client().createCollection( ns, 1900, true );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT_EQUALS( 0, c->getCursorId() );
+ ASSERT( c->isDead() );
+ insert( ns, BSON( "a" << 0 ) );
+ c = client().query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT( 0 != c->getCursorId() );
+ ASSERT( !c->isDead() );
+ }
+ };
+
+ class TailableDelete : public ClientBase {
+ public:
+ ~TailableDelete() {
+ client().dropCollection( "unittests.querytests.TailableDelete" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailableDelete";
+ client().createCollection( ns, 8192, true, 2 );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ c->next();
+ c->next();
+ ASSERT( !c->more() );
+ insert( ns, BSON( "a" << 2 ) );
+ insert( ns, BSON( "a" << 3 ) );
+ ASSERT( !c->more() );
+ ASSERT_EQUALS( 0, c->getCursorId() );
+ }
+ };
+
+ class TailableInsertDelete : public ClientBase {
+ public:
+ ~TailableInsertDelete() {
+ client().dropCollection( "unittests.querytests.TailableInsertDelete" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailableInsertDelete";
+ client().createCollection( ns, 1330, true );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ c->next();
+ c->next();
+ ASSERT( !c->more() );
+ insert( ns, BSON( "a" << 2 ) );
+ client().remove( ns, QUERY( "a" << 1 ) );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 2, c->next().getIntField( "a" ) );
+ ASSERT( !c->more() );
+ }
+ };
+
+ class TailCappedOnly : public ClientBase {
+ public:
+ ~TailCappedOnly() {
+ client().dropCollection( "unittest.querytests.TailCappedOnly" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailCappedOnly";
+ client().insert( ns, BSONObj() );
+ auto_ptr< DBClientCursor > c = client().query( ns, BSONObj(), 0, 0, 0, QueryOption_CursorTailable );
+ ASSERT( c->isDead() );
+ ASSERT( !client().getLastError().empty() );
+ }
+ };
+
+ class TailableQueryOnId : public ClientBase {
+ public:
+ ~TailableQueryOnId() {
+ client().dropCollection( "unittests.querytests.TailableQueryOnId" );
+ }
+
+ void insertA(const char* ns, int a) {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.appendOID("value", 0, true);
+ b.append("a", a);
+ insert(ns, b.obj());
+ }
+
+ void run() {
+ const char *ns = "unittests.querytests.TailableQueryOnId";
+ BSONObj info;
+ client().runCommand( "unittests", BSON( "create" << "querytests.TailableQueryOnId" << "capped" << true << "size" << 8192 << "autoIndexId" << true ), info );
+ insertA( ns, 0 );
+ insertA( ns, 1 );
+ auto_ptr< DBClientCursor > c1 = client().query( ns, QUERY( "a" << GT << -1 ), 0, 0, 0, QueryOption_CursorTailable );
+ OID id;
+ id.init("000000000000000000000000");
+ auto_ptr< DBClientCursor > c2 = client().query( ns, QUERY( "value" << GT << id ), 0, 0, 0, QueryOption_CursorTailable );
+ c1->next();
+ c1->next();
+ ASSERT( !c1->more() );
+ c2->next();
+ c2->next();
+ ASSERT( !c2->more() );
+ insertA( ns, 2 );
+ ASSERT( c1->more() );
+ ASSERT_EQUALS( 2, c1->next().getIntField( "a" ) );
+ ASSERT( !c1->more() );
+ ASSERT( c2->more() );
+ ASSERT_EQUALS( 2, c2->next().getIntField( "a" ) ); // SERVER-645
+ ASSERT( !c2->more() );
+ ASSERT( !c2->isDead() );
+ }
+ };
+
+ class OplogReplayMode : public ClientBase {
+ public:
+ ~OplogReplayMode() {
+ client().dropCollection( "unittests.querytests.OplogReplayMode" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.OplogReplayMode";
+ insert( ns, BSON( "ts" << 0 ) );
+ insert( ns, BSON( "ts" << 1 ) );
+ insert( ns, BSON( "ts" << 2 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
+ ASSERT( !c->more() );
+
+ insert( ns, BSON( "ts" << 3 ) );
+ c = client().query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
+ ASSERT( c->more() );
+ }
+ };
+
+ class BasicCount : public ClientBase {
+ public:
+ ~BasicCount() {
+ client().dropCollection( "unittests.querytests.BasicCount" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.BasicCount";
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ count( 0 );
+ insert( ns, BSON( "a" << 3 ) );
+ count( 0 );
+ insert( ns, BSON( "a" << 4 ) );
+ count( 1 );
+ insert( ns, BSON( "a" << 5 ) );
+ count( 1 );
+ insert( ns, BSON( "a" << 4 ) );
+ count( 2 );
+ }
+ private:
+ void count( unsigned long long c ) const {
+ ASSERT_EQUALS( c, client().count( "unittests.querytests.BasicCount", BSON( "a" << 4 ) ) );
+ }
+ };
+
+ class ArrayId : public ClientBase {
+ public:
+ ~ArrayId() {
+ client().dropCollection( "unittests.querytests.ArrayId" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.ArrayId";
+ client().ensureIndex( ns, BSON( "_id" << 1 ) );
+ ASSERT( !error() );
+ client().insert( ns, fromjson( "{'_id':[1,2]}" ) );
+ ASSERT( error() );
+ }
+ };
+
+ class UnderscoreNs : public ClientBase {
+ public:
+ ~UnderscoreNs() {
+ client().dropCollection( "unittests.querytests._UnderscoreNs" );
+ }
+ void run() {
+ ASSERT( !error() );
+ const char *ns = "unittests.querytests._UnderscoreNs";
+ ASSERT( client().findOne( ns, "{}" ).isEmpty() );
+ client().insert( ns, BSON( "a" << 1 ) );
+ ASSERT_EQUALS( 1, client().findOne( ns, "{}" ).getIntField( "a" ) );
+ ASSERT( !error() );
+ }
+ };
+
+ class EmptyFieldSpec : public ClientBase {
+ public:
+ ~EmptyFieldSpec() {
+ client().dropCollection( "unittests.querytests.EmptyFieldSpec" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.EmptyFieldSpec";
+ client().insert( ns, BSON( "a" << 1 ) );
+ ASSERT( !client().findOne( ns, "" ).isEmpty() );
+ BSONObj empty;
+ ASSERT( !client().findOne( ns, "", &empty ).isEmpty() );
+ }
+ };
+
+ class MultiNe : public ClientBase {
+ public:
+ ~MultiNe() {
+ client().dropCollection( "unittests.querytests.Ne" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.Ne";
+ client().insert( ns, fromjson( "{a:[1,2]}" ) );
+ ASSERT( client().findOne( ns, fromjson( "{a:{$ne:1}}" ) ).isEmpty() );
+ BSONObj spec = fromjson( "{a:{$ne:1,$ne:2}}" );
+ ASSERT( client().findOne( ns, spec ).isEmpty() );
+ }
+ };
+
+ class EmbeddedNe : public ClientBase {
+ public:
+ ~EmbeddedNe() {
+ client().dropCollection( "unittests.querytests.NestedNe" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.NestedNe";
+ client().insert( ns, fromjson( "{a:[{b:1},{b:2}]}" ) );
+ ASSERT( client().findOne( ns, fromjson( "{'a.b':{$ne:1}}" ) ).isEmpty() );
+ }
+ };
+
+ class EmbeddedNumericTypes : public ClientBase {
+ public:
+ ~EmbeddedNumericTypes() {
+ client().dropCollection( "unittests.querytests.NumericEmbedded" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.NumericEmbedded";
+ client().insert( ns, BSON( "a" << BSON ( "b" << 1 ) ) );
+ ASSERT( ! client().findOne( ns, BSON( "a" << BSON ( "b" << 1.0 ) ) ).isEmpty() );
+ client().ensureIndex( ns , BSON( "a" << 1 ) );
+ ASSERT( ! client().findOne( ns, BSON( "a" << BSON ( "b" << 1.0 ) ) ).isEmpty() );
+ }
+ };
+
+ class AutoResetIndexCache : public ClientBase {
+ public:
+ ~AutoResetIndexCache() {
+ client().dropCollection( "unittests.querytests.AutoResetIndexCache" );
+ }
+ static const char *ns() { return "unittests.querytests.AutoResetIndexCache"; }
+ static const char *idxNs() { return "unittests.system.indexes"; }
+ void index() const { ASSERT( !client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) ).isEmpty() ); }
+ void noIndex() const {
+ BSONObj o = client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) );
+ if( !o.isEmpty() ) {
+ cout << o.toString() << endl;
+ ASSERT( false );
+ }
+ }
+ void checkIndex() {
+ client().ensureIndex( ns(), BSON( "a" << 1 ) );
+ index();
+ }
+ void run() {
+ client().dropDatabase( "unittests" );
+ noIndex();
+ checkIndex();
+ client().dropCollection( ns() );
+ noIndex();
+ checkIndex();
+ client().dropDatabase( "unittests" );
+ noIndex();
+ checkIndex();
+ }
+ };
+
+ class UniqueIndex : public ClientBase {
+ public:
+ ~UniqueIndex() {
+ client().dropCollection( "unittests.querytests.UniqueIndex" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.UniqueIndex";
+ client().ensureIndex( ns, BSON( "a" << 1 ), true );
+ client().insert( ns, BSON( "a" << 4 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 4 << "b" << 3 ) );
+ ASSERT_EQUALS( 1U, client().count( ns, BSONObj() ) );
+ client().dropCollection( ns );
+ client().ensureIndex( ns, BSON( "b" << 1 ), true );
+ client().insert( ns, BSON( "a" << 4 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 4 << "b" << 3 ) );
+ ASSERT_EQUALS( 2U, client().count( ns, BSONObj() ) );
+ }
+ };
+
+ class UniqueIndexPreexistingData : public ClientBase {
+ public:
+ ~UniqueIndexPreexistingData() {
+ client().dropCollection( "unittests.querytests.UniqueIndexPreexistingData" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.UniqueIndexPreexistingData";
+ client().insert( ns, BSON( "a" << 4 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 4 << "b" << 3 ) );
+ client().ensureIndex( ns, BSON( "a" << 1 ), true );
+ ASSERT_EQUALS( 0U, client().count( "unittests.system.indexes", BSON( "ns" << ns << "name" << NE << "_id_" ) ) );
+ }
+ };
+
+ class SubobjectInArray : public ClientBase {
+ public:
+ ~SubobjectInArray() {
+ client().dropCollection( "unittests.querytests.SubobjectInArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.SubobjectInArray";
+ client().insert( ns, fromjson( "{a:[{b:{c:1}}]}" ) );
+ ASSERT( !client().findOne( ns, BSON( "a.b.c" << 1 ) ).isEmpty() );
+ ASSERT( !client().findOne( ns, fromjson( "{'a.c':null}" ) ).isEmpty() );
+ }
+ };
+
+ class Size : public ClientBase {
+ public:
+ ~Size() {
+ client().dropCollection( "unittests.querytests.Size" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.Size";
+ client().insert( ns, fromjson( "{a:[1,2,3]}" ) );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT( client().query( ns, QUERY( "a" << mongo::SIZE << 3 ).hint( BSON( "a" << 1 ) ) )->more() );
+ }
+ };
+
+ class FullArray : public ClientBase {
+ public:
+ ~FullArray() {
+ client().dropCollection( "unittests.querytests.IndexedArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.IndexedArray";
+ client().insert( ns, fromjson( "{a:[1,2,3]}" ) );
+ ASSERT( client().query( ns, Query( "{a:[1,2,3]}" ) )->more() );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT( client().query( ns, Query( "{a:{$in:[1,[1,2,3]]}}" ).hint( BSON( "a" << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{a:[1,2,3]}" ).hint( BSON( "a" << 1 ) ) )->more() ); // SERVER-146
+ }
+ };
+
+ class InsideArray : public ClientBase {
+ public:
+ ~InsideArray() {
+ client().dropCollection( "unittests.querytests.InsideArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.InsideArray";
+ client().insert( ns, fromjson( "{a:[[1],2]}" ) );
+ check( "$natural" );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ check( "a" ); // SERVER-146
+ }
+ private:
+ void check( const string &hintField ) {
+ const char *ns = "unittests.querytests.InsideArray";
+ ASSERT( client().query( ns, Query( "{a:[[1],2]}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{a:[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{a:2}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( !client().query( ns, Query( "{a:1}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ }
+ };
+
+ class IndexInsideArrayCorrect : public ClientBase {
+ public:
+ ~IndexInsideArrayCorrect() {
+ client().dropCollection( "unittests.querytests.IndexInsideArrayCorrect" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.IndexInsideArrayCorrect";
+ client().insert( ns, fromjson( "{'_id':1,a:[1]}" ) );
+ client().insert( ns, fromjson( "{'_id':2,a:[[1]]}" ) );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT_EQUALS( 1, client().query( ns, Query( "{a:[1]}" ).hint( BSON( "a" << 1 ) ) )->next().getIntField( "_id" ) );
+ }
+ };
+
+ class SubobjArr : public ClientBase {
+ public:
+ ~SubobjArr() {
+ client().dropCollection( "unittests.querytests.SubobjArr" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.SubobjArr";
+ client().insert( ns, fromjson( "{a:[{b:[1]}]}" ) );
+ check( "$natural" );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ check( "a" );
+ }
+ private:
+ void check( const string &hintField ) {
+ const char *ns = "unittests.querytests.SubobjArr";
+ ASSERT( client().query( ns, Query( "{'a.b':1}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{'a.b':[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ }
+ };
+
+ class MinMax : public ClientBase {
+ public:
+ MinMax() : ns( "unittests.querytests.MinMax" ) {}
+ ~MinMax() {
+ client().dropCollection( "unittests.querytests.MinMax" );
+ }
+ void run() {
+ client().ensureIndex( ns, BSON( "a" << 1 << "b" << 1 ) );
+ client().insert( ns, BSON( "a" << 1 << "b" << 1 ) );
+ client().insert( ns, BSON( "a" << 1 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 2 << "b" << 1 ) );
+ client().insert( ns, BSON( "a" << 2 << "b" << 2 ) );
+
+ ASSERT_EQUALS( 4, count( client().query( ns, BSONObj() ) ) );
+ BSONObj hints[] = { BSONObj(), BSON( "a" << 1 << "b" << 1 ) };
+ for( int i = 0; i < 2; ++i ) {
+ check( 0, 0, 3, 3, 4, hints[ i ] );
+ check( 1, 1, 2, 2, 3, hints[ i ] );
+ check( 1, 2, 2, 2, 2, hints[ i ] );
+ check( 1, 2, 2, 1, 1, hints[ i ] );
+
+ auto_ptr< DBClientCursor > c = query( 1, 2, 2, 2, hints[ i ] );
+ BSONObj obj = c->next();
+ ASSERT_EQUALS( 1, obj.getIntField( "a" ) );
+ ASSERT_EQUALS( 2, obj.getIntField( "b" ) );
+ obj = c->next();
+ ASSERT_EQUALS( 2, obj.getIntField( "a" ) );
+ ASSERT_EQUALS( 1, obj.getIntField( "b" ) );
+ ASSERT( !c->more() );
+ }
+ }
+ private:
+ auto_ptr< DBClientCursor > query( int minA, int minB, int maxA, int maxB, const BSONObj &hint ) {
+ Query q;
+ q = q.minKey( BSON( "a" << minA << "b" << minB ) ).maxKey( BSON( "a" << maxA << "b" << maxB ) );
+ if ( !hint.isEmpty() )
+ q.hint( hint );
+ return client().query( ns, q );
+ }
+ void check( int minA, int minB, int maxA, int maxB, int expectedCount, const BSONObj &hint = empty_ ) {
+ ASSERT_EQUALS( expectedCount, count( query( minA, minB, maxA, maxB, hint ) ) );
+ }
+ int count( auto_ptr< DBClientCursor > c ) {
+ int ret = 0;
+ while( c->more() ) {
+ ++ret;
+ c->next();
+ }
+ return ret;
+ }
+ const char *ns;
+ static BSONObj empty_;
+ };
+ BSONObj MinMax::empty_;
+
+ class MatchCodeCodeWScope : public ClientBase {
+ public:
+ MatchCodeCodeWScope() : _ns( "unittests.querytests.MatchCodeCodeWScope" ) {}
+ ~MatchCodeCodeWScope() {
+ client().dropCollection( "unittests.querytests.MatchCodeCodeWScope" );
+ }
+ void run() {
+ checkMatch();
+ client().ensureIndex( _ns, BSON( "a" << 1 ) );
+ checkMatch();
+ // Use explain queries to check index bounds.
+ {
+ BSONObj explain = client().findOne( _ns, QUERY( "a" << BSON( "$type" << (int)Code ) ).explain() );
+ BSONObjBuilder lower;
+ lower.appendCode( "", "" );
+ BSONObjBuilder upper;
+ upper.appendCodeWScope( "", "", BSONObj() );
+ ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
+ ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
+ }
+ {
+ BSONObj explain = client().findOne( _ns, QUERY( "a" << BSON( "$type" << (int)CodeWScope ) ).explain() );
+ BSONObjBuilder lower;
+ lower.appendCodeWScope( "", "", BSONObj() );
+ // This upper bound may change if a new bson type is added.
+ BSONObjBuilder upper;
+ upper << "" << BSON( "$maxElement" << 1 );
+ ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
+ ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
+ }
+ }
+ private:
+ void checkMatch() {
+ client().remove( _ns, BSONObj() );
+
+ client().insert( _ns, code() );
+ client().insert( _ns, codeWScope() );
+
+ ASSERT_EQUALS( 1U, client().count( _ns, code() ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, codeWScope() ) );
+
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)Code ) ) ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)CodeWScope ) ) ) );
+ }
+ BSONObj code() const {
+ BSONObjBuilder codeBuilder;
+ codeBuilder.appendCode( "a", "return 1;" );
+ return codeBuilder.obj();
+ }
+ BSONObj codeWScope() const {
+ BSONObjBuilder codeWScopeBuilder;
+ codeWScopeBuilder.appendCodeWScope( "a", "return 1;", BSONObj() );
+ return codeWScopeBuilder.obj();
+ }
+ const char *_ns;
+ };
+
+ class MatchDBRefType : public ClientBase {
+ public:
+ MatchDBRefType() : _ns( "unittests.querytests.MatchDBRefType" ) {}
+ ~MatchDBRefType() {
+ client().dropCollection( "unittests.querytests.MatchDBRefType" );
+ }
+ void run() {
+ checkMatch();
+ client().ensureIndex( _ns, BSON( "a" << 1 ) );
+ checkMatch();
+ }
+ private:
+ void checkMatch() {
+ client().remove( _ns, BSONObj() );
+ client().insert( _ns, dbref() );
+ ASSERT_EQUALS( 1U, client().count( _ns, dbref() ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)DBRef ) ) ) );
+ }
+ BSONObj dbref() const {
+ BSONObjBuilder b;
+ OID oid;
+ b.appendDBRef( "a", "ns", oid );
+ return b.obj();
+ }
+ const char *_ns;
+ };
+
+ class DirectLocking : public ClientBase {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( "unittests.DirectLocking" );
+ client().remove( "a.b", BSONObj() );
+ ASSERT_EQUALS( "unittests", cc().database()->name );
+ }
+ const char *ns;
+ };
+
+ class FastCountIn : public ClientBase {
+ public:
+ ~FastCountIn() {
+ client().dropCollection( "unittests.querytests.FastCountIn" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.FastCountIn";
+ client().insert( ns, BSON( "i" << "a" ) );
+ client().ensureIndex( ns, BSON( "i" << 1 ) );
+ ASSERT_EQUALS( 1U, client().count( ns, fromjson( "{i:{$in:['a']}}" ) ) );
+ }
+ };
+
+ class EmbeddedArray : public ClientBase {
+ public:
+ ~EmbeddedArray() {
+ client().dropCollection( "unittests.querytests.EmbeddedArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.EmbeddedArray";
+ client().insert( ns, fromjson( "{foo:{bar:['spam']}}" ) );
+ client().insert( ns, fromjson( "{foo:{bar:['spam','eggs']}}" ) );
+ client().insert( ns, fromjson( "{bar:['spam']}" ) );
+ client().insert( ns, fromjson( "{bar:['spam','eggs']}" ) );
+ ASSERT_EQUALS( 2U, client().count( ns, BSON( "bar" << "spam" ) ) );
+ ASSERT_EQUALS( 2U, client().count( ns, BSON( "foo.bar" << "spam" ) ) );
+ }
+ };
+
+ class DifferentNumbers : public ClientBase {
+ public:
+ ~DifferentNumbers() {
+ client().dropCollection( "unittests.querytests.DifferentNumbers" );
+ }
+ void t( const char * ns ) {
+ auto_ptr< DBClientCursor > cursor = client().query( ns, Query().sort( "7" ) );
+ while ( cursor->more() ) {
+ BSONObj o = cursor->next();
+ assert( o.valid() );
+ //cout << " foo " << o << endl;
+ }
+
+ }
+ void run() {
+ const char *ns = "unittests.querytests.DifferentNumbers";
+ { BSONObjBuilder b; b.append( "7" , (int)4 ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.append( "7" , (long long)2 ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.appendNull( "7" ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.append( "7" , "b" ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.appendNull( "8" ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.append( "7" , (double)3.7 ); client().insert( ns , b.obj() ); }
+
+ t(ns);
+ client().ensureIndex( ns , BSON( "7" << 1 ) );
+ t(ns);
+ }
+ };
+
+ class CollectionBase : public ClientBase {
+ public:
+
+ CollectionBase( string leaf ) {
+ _ns = "unittests.querytests.";
+ _ns += leaf;
+ client().dropCollection( ns() );
+ }
+
+ virtual ~CollectionBase() {
+ client().dropCollection( ns() );
+ }
+
+ int count() {
+ return (int) client().count( ns() );
+ }
+
+ const char * ns() {
+ return _ns.c_str();
+ }
+
+ private:
+ string _ns;
+ };
+
+ class SymbolStringSame : public CollectionBase {
+ public:
+ SymbolStringSame() : CollectionBase( "symbolstringsame" ) {}
+
+ void run() {
+ { BSONObjBuilder b; b.appendSymbol( "x" , "eliot" ); b.append( "z" , 17 ); client().insert( ns() , b.obj() ); }
+ ASSERT_EQUALS( 17 , client().findOne( ns() , BSONObj() )["z"].number() );
+ {
+ BSONObjBuilder b;
+ b.appendSymbol( "x" , "eliot" );
+ ASSERT_EQUALS( 17 , client().findOne( ns() , b.obj() )["z"].number() );
+ }
+ ASSERT_EQUALS( 17 , client().findOne( ns() , BSON( "x" << "eliot" ) )["z"].number() );
+ client().ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 17 , client().findOne( ns() , BSON( "x" << "eliot" ) )["z"].number() );
+ }
+ };
+
+ class TailableCappedRaceCondition : public CollectionBase {
+ public:
+
+ TailableCappedRaceCondition() : CollectionBase( "tailablecappedrace" ) {
+ client().dropCollection( ns() );
+ _n = 0;
+ }
+ void run() {
+ string err;
+
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ // note that extents are always at least 4KB now - so this will get rounded up a bit.
+ ASSERT( userCreateNS( ns() , fromjson( "{ capped : true , size : 2000 }" ) , err , false ) );
+ for ( int i=0; i<200; i++ ) {
+ insertNext();
+// cout << count() << endl;
+ ASSERT( count() < 90 );
+ }
+
+ int a = count();
+
+ auto_ptr< DBClientCursor > c = client().query( ns() , QUERY( "i" << GT << 0 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_CursorTailable );
+ int n=0;
+ while ( c->more() ) {
+ BSONObj z = c->next();
+ n++;
+ }
+
+ ASSERT_EQUALS( a , n );
+
+ insertNext();
+ ASSERT( c->more() );
+
+ for ( int i=0; i<90; i++ ) {
+ insertNext();
+ }
+
+ while ( c->more() ) { c->next(); }
+ ASSERT( c->isDead() );
+ }
+
+ void insertNext() {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.append("i", _n++);
+ insert( ns() , b.obj() );
+ }
+
+ int _n;
+ };
+
+ class HelperTest : public CollectionBase {
+ public:
+
+ HelperTest() : CollectionBase( "helpertest" ) {
+ }
+
+ void run() {
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ for ( int i=0; i<50; i++ ) {
+ insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
+ }
+
+ ASSERT_EQUALS( 50 , count() );
+
+ BSONObj res;
+ ASSERT( Helpers::findOne( ns() , BSON( "_id" << 20 ) , res , true ) );
+ ASSERT_EQUALS( 40 , res["x"].numberInt() );
+
+ ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) );
+ ASSERT_EQUALS( 40 , res["x"].numberInt() );
+
+ ASSERT( ! Helpers::findById( cc(), ns() , BSON( "_id" << 200 ) , res ) );
+
+ unsigned long long slow , fast;
+
+ int n = 10000;
+ DEV n = 1000;
+ {
+ Timer t;
+ for ( int i=0; i<n; i++ ) {
+ ASSERT( Helpers::findOne( ns() , BSON( "_id" << 20 ) , res , true ) );
+ }
+ slow = t.micros();
+ }
+ {
+ Timer t;
+ for ( int i=0; i<n; i++ ) {
+ ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) );
+ }
+ fast = t.micros();
+ }
+
+ cout << "HelperTest slow:" << slow << " fast:" << fast << endl;
+
+ }
+ };
+
+ class HelperByIdTest : public CollectionBase {
+ public:
+
+ HelperByIdTest() : CollectionBase( "helpertestbyid" ) {
+ }
+
+ void run() {
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ for ( int i=0; i<1000; i++ ) {
+ insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
+ }
+ for ( int i=0; i<1000; i+=2 ) {
+ client_.remove( ns() , BSON( "_id" << i ) );
+ }
+
+ BSONObj res;
+ for ( int i=0; i<1000; i++ ) {
+ bool found = Helpers::findById( cc(), ns() , BSON( "_id" << i ) , res );
+ ASSERT_EQUALS( i % 2 , int(found) );
+ }
+
+ }
+ };
+
+ class ClientCursorTest : public CollectionBase {
+ ClientCursorTest() : CollectionBase( "clientcursortest" ) {
+ }
+
+ void run() {
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ for ( int i=0; i<1000; i++ ) {
+ insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
+ }
+
+
+ }
+ };
+
+ class FindingStart : public CollectionBase {
+ public:
+ FindingStart() : CollectionBase( "findingstart" ), _old( __findingStartInitialTimeout ) {
+ __findingStartInitialTimeout = 0;
+ }
+ ~FindingStart() {
+ __findingStartInitialTimeout = _old;
+ }
+
+ void run() {
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ int i = 0;
+ for( int oldCount = -1;
+ count() != oldCount;
+ oldCount = count(), client().insert( ns(), BSON( "ts" << i++ ) ) );
+
+ for( int k = 0; k < 5; ++k ) {
+ client().insert( ns(), BSON( "ts" << i++ ) );
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ for( int j = -1; j < i; ++j ) {
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ BSONObj next = c->next();
+ ASSERT( !next[ "ts" ].eoo() );
+ ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
+ }
+ //cout << k << endl;
+ }
+ }
+
+ private:
+ int _old;
+ };
+
+ class FindingStartPartiallyFull : public CollectionBase {
+ public:
+ FindingStartPartiallyFull() : CollectionBase( "findingstart" ), _old( __findingStartInitialTimeout ) {
+ __findingStartInitialTimeout = 0;
+ }
+ ~FindingStartPartiallyFull() {
+ __findingStartInitialTimeout = _old;
+ }
+
+ void run() {
+ unsigned startNumCursors = ClientCursor::numCursors();
+
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ int i = 0;
+ for( ; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
+
+ for( int k = 0; k < 5; ++k ) {
+ client().insert( ns(), BSON( "ts" << i++ ) );
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ for( int j = -1; j < i; ++j ) {
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ BSONObj next = c->next();
+ ASSERT( !next[ "ts" ].eoo() );
+ ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
+ }
+ }
+
+ ASSERT_EQUALS( startNumCursors, ClientCursor::numCursors() );
+ }
+
+ private:
+ int _old;
+ };
+
+ /**
+ * Check OplogReplay mode where query timestamp is earlier than the earliest
+ * entry in the collection.
+ */
+ class FindingStartStale : public CollectionBase {
+ public:
+ FindingStartStale() : CollectionBase( "findingstart" ) {}
+
+ void run() {
+ unsigned startNumCursors = ClientCursor::numCursors();
+
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ // Check OplogReplay mode with empty collection.
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( !c->more() );
+
+ // Check with some docs in the collection.
+ for( int i = 100; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
+ c = client().query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 100, c->next()[ "ts" ].numberInt() );
+
+ // Check that no persistent cursors outlast our queries above.
+ ASSERT_EQUALS( startNumCursors, ClientCursor::numCursors() );
+ }
+ };
+
+ class WhatsMyUri : public CollectionBase {
+ public:
+ WhatsMyUri() : CollectionBase( "whatsmyuri" ) {}
+ void run() {
+ BSONObj result;
+ client().runCommand( "admin", BSON( "whatsmyuri" << 1 ), result );
+ ASSERT_EQUALS( unknownAddress.toString(), result[ "you" ].str() );
+ }
+ };
+
+ namespace parsedtests {
+ class basic1 {
+ public:
+ void _test( const BSONObj& in ) {
+ ParsedQuery q( "a.b" , 5 , 6 , 9 , in , BSONObj() );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , q.getFilter() );
+ }
+ void run() {
+ _test( BSON( "x" << 5 ) );
+ _test( BSON( "query" << BSON( "x" << 5 ) ) );
+ _test( BSON( "$query" << BSON( "x" << 5 ) ) );
+
+ {
+ ParsedQuery q( "a.b" , 5 , 6 , 9 , BSON( "x" << 5 ) , BSONObj() );
+ ASSERT_EQUALS( 6 , q.getNumToReturn() );
+ ASSERT( q.wantMore() );
+ }
+ {
+ ParsedQuery q( "a.b" , 5 , -6 , 9 , BSON( "x" << 5 ) , BSONObj() );
+ ASSERT_EQUALS( 6 , q.getNumToReturn() );
+ ASSERT( ! q.wantMore() );
+ }
+ }
+ };
+ };
+
+ namespace queryobjecttests {
+ class names1 {
+ public:
+ void run() {
+ ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "query" << BSON( "x" << 1 ) ).getFilter() );
+ ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "$query" << BSON( "x" << 1 ) ).getFilter() );
+ }
+
+ };
+ }
+
+ class OrderingTest {
+ public:
+ void run() {
+ {
+ Ordering o = Ordering::make( BSON( "a" << 1 << "b" << -1 << "c" << 1 ) );
+ ASSERT_EQUALS( 1 , o.get(0) );
+ ASSERT_EQUALS( -1 , o.get(1) );
+ ASSERT_EQUALS( 1 , o.get(2) );
+
+ ASSERT( ! o.descending( 1 ) );
+ ASSERT( o.descending( 1 << 1 ) );
+ ASSERT( ! o.descending( 1 << 2 ) );
+ }
+
+ {
+ Ordering o = Ordering::make( BSON( "a.d" << 1 << "a" << 1 << "e" << -1 ) );
+ ASSERT_EQUALS( 1 , o.get(0) );
+ ASSERT_EQUALS( 1 , o.get(1) );
+ ASSERT_EQUALS( -1 , o.get(2) );
+
+ ASSERT( ! o.descending( 1 ) );
+ ASSERT( ! o.descending( 1 << 1 ) );
+ ASSERT( o.descending( 1 << 2 ) );
+ }
+
+ }
+ };
+
+ namespace proj { // Projection tests
+
+ class T1 {
+ public:
+ void run() {
+
+ Projection m;
+ m.init( BSON( "a" << 1 ) );
+ ASSERT_EQUALS( BSON( "a" << 5 ) , m.transform( BSON( "x" << 1 << "a" << 5 ) ) );
+ }
+ };
+
+ class K1 {
+ public:
+ void run() {
+
+ Projection m;
+ m.init( BSON( "a" << 1 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
+ ASSERT( ! x );
+
+ x.reset( m.checkKey( BSON( "a" << 1 << "_id" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 5 << "_id" << 17 ) ,
+ x->hydrate( BSON( "" << 5 << "" << 17 ) ) );
+
+ x.reset( m.checkKey( BSON( "a" << 1 << "x" << 1 << "_id" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 5 << "_id" << 17 ) ,
+ x->hydrate( BSON( "" << 5 << "" << 123 << "" << 17 ) ) );
+
+ }
+ };
+
+ class K2 {
+ public:
+ void run() {
+
+ Projection m;
+ m.init( BSON( "a" << 1 << "_id" << 0 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 17 ) ,
+ x->hydrate( BSON( "" << 17 ) ) );
+
+ x.reset( m.checkKey( BSON( "x" << 1 << "a" << 1 << "_id" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 123 ) ,
+ x->hydrate( BSON( "" << 5 << "" << 123 << "" << 17 ) ) );
+
+ }
+ };
+
+
+ class K3 {
+ public:
+ void run() {
+
+ {
+ Projection m;
+ m.init( BSON( "a" << 1 << "_id" << 0 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
+ ASSERT( x );
+ }
+
+
+ {
+ // TODO: this is temporary SERVER-2104
+ Projection m;
+ m.init( BSON( "x.a" << 1 << "_id" << 0 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
+ ASSERT( ! x );
+ }
+
+ }
+ };
+
+
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "query" ) {
+ }
+
+ void setupTests() {
+ add< FindingStart >();
+ add< FindOne >();
+ add< FindOneRequireIndex >();
+ add< FindOneEmptyObj >();
+ add< BoundedKey >();
+ add< GetMore >();
+ add< PositiveLimit >();
+ add< ReturnOneOfManyAndTail >();
+ add< TailNotAtEnd >();
+ add< EmptyTail >();
+ add< TailableDelete >();
+ add< TailableInsertDelete >();
+ add< TailCappedOnly >();
+ add< TailableQueryOnId >();
+ add< OplogReplayMode >();
+ add< ArrayId >();
+ add< UnderscoreNs >();
+ add< EmptyFieldSpec >();
+ add< MultiNe >();
+ add< EmbeddedNe >();
+ add< EmbeddedNumericTypes >();
+ add< AutoResetIndexCache >();
+ add< UniqueIndex >();
+ add< UniqueIndexPreexistingData >();
+ add< SubobjectInArray >();
+ add< Size >();
+ add< FullArray >();
+ add< InsideArray >();
+ add< IndexInsideArrayCorrect >();
+ add< SubobjArr >();
+ add< MinMax >();
+ add< MatchCodeCodeWScope >();
+ add< MatchDBRefType >();
+ add< DirectLocking >();
+ add< FastCountIn >();
+ add< EmbeddedArray >();
+ add< DifferentNumbers >();
+ add< SymbolStringSame >();
+ add< TailableCappedRaceCondition >();
+ add< HelperTest >();
+ add< HelperByIdTest >();
+ add< FindingStartPartiallyFull >();
+ add< FindingStartStale >();
+ add< WhatsMyUri >();
+
+ add< parsedtests::basic1 >();
+
+ add< queryobjecttests::names1 >();
+
+ add< OrderingTest >();
+
+ add< proj::T1 >();
+ add< proj::K1 >();
+ add< proj::K2 >();
+ add< proj::K3 >();
+ }
+ } myall;
+
+} // namespace QueryTests
+
diff --git a/src/mongo/dbtests/queryutiltests.cpp b/src/mongo/dbtests/queryutiltests.cpp
new file mode 100644
index 00000000000..e825b4f8a9b
--- /dev/null
+++ b/src/mongo/dbtests/queryutiltests.cpp
@@ -0,0 +1,989 @@
+// queryutiltests.cpp : query utility unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/queryutil.h"
+#include "../db/querypattern.h"
+#include "../db/instance.h"
+#include "../db/pdfile.h"
+#include "dbtests.h"
+
+namespace QueryUtilTests {
+
+ namespace FieldRangeTests {
+ class Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ const FieldRangeSet s( "ns", query(), true );
+ checkElt( lower(), s.range( "a" ).min() );
+ checkElt( upper(), s.range( "a" ).max() );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).minInclusive() );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).maxInclusive() );
+ }
+ protected:
+ virtual BSONObj query() = 0;
+ virtual BSONElement lower() { return minKey.firstElement(); }
+ virtual bool lowerInclusive() { return true; }
+ virtual BSONElement upper() { return maxKey.firstElement(); }
+ virtual bool upperInclusive() { return true; }
+ static void checkElt( BSONElement expected, BSONElement actual ) {
+ if ( expected.woCompare( actual, false ) ) {
+ log() << "expected: " << expected << ", got: " << actual;
+ ASSERT( false );
+ }
+ }
+ };
+
+
+ class NumericBase : public Base {
+ public:
+ NumericBase() {
+ o = BSON( "min" << -numeric_limits<double>::max() << "max" << numeric_limits<double>::max() );
+ }
+
+ virtual BSONElement lower() { return o["min"]; }
+ virtual BSONElement upper() { return o["max"]; }
+ private:
+ BSONObj o;
+ };
+
+ class Empty : public Base {
+ virtual BSONObj query() { return BSONObj(); }
+ };
+
+ class Eq : public Base {
+ public:
+ Eq() : o_( BSON( "a" << 1 ) ) {}
+ virtual BSONObj query() { return o_; }
+ virtual BSONElement lower() { return o_.firstElement(); }
+ virtual BSONElement upper() { return o_.firstElement(); }
+ BSONObj o_;
+ };
+
+ class DupEq : public Eq {
+ public:
+ virtual BSONObj query() { return BSON( "a" << 1 << "b" << 2 << "a" << 1 ); }
+ };
+
+ class Lt : public NumericBase {
+ public:
+ Lt() : o_( BSON( "-" << 1 ) ) {}
+ virtual BSONObj query() { return BSON( "a" << LT << 1 ); }
+ virtual BSONElement upper() { return o_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o_;
+ };
+
+ class Lte : public Lt {
+ virtual BSONObj query() { return BSON( "a" << LTE << 1 ); }
+ virtual bool upperInclusive() { return true; }
+ };
+
+ class Gt : public NumericBase {
+ public:
+ Gt() : o_( BSON( "-" << 1 ) ) {}
+ virtual BSONObj query() { return BSON( "a" << GT << 1 ); }
+ virtual BSONElement lower() { return o_.firstElement(); }
+ virtual bool lowerInclusive() { return false; }
+ BSONObj o_;
+ };
+
+ class Gte : public Gt {
+ virtual BSONObj query() { return BSON( "a" << GTE << 1 ); }
+ virtual bool lowerInclusive() { return true; }
+ };
+
+ class TwoLt : public Lt {
+ virtual BSONObj query() { return BSON( "a" << LT << 1 << LT << 5 ); }
+ };
+
+ class TwoGt : public Gt {
+ virtual BSONObj query() { return BSON( "a" << GT << 0 << GT << 1 ); }
+ };
+
+ class EqGte : public Eq {
+ virtual BSONObj query() { return BSON( "a" << 1 << "a" << GTE << 1 ); }
+ };
+
+ class EqGteInvalid {
+ public:
+ void run() {
+ FieldRangeSet frs( "ns", BSON( "a" << 1 << "a" << GTE << 2 ), true );
+ ASSERT( !frs.matchPossible() );
+ }
+ };
+
+ struct RegexBase : Base {
+ void run() { //need to only look at first interval
+ FieldRangeSet s( "ns", query(), true );
+ checkElt( lower(), s.range( "a" ).intervals()[0]._lower._bound );
+ checkElt( upper(), s.range( "a" ).intervals()[0]._upper._bound );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).intervals()[0]._lower._inclusive );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).intervals()[0]._upper._inclusive );
+ }
+ };
+
+ class Regex : public RegexBase {
+ public:
+ Regex() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
+ virtual BSONObj query() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "^abc" );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o1_, o2_;
+ };
+
+ class RegexObj : public RegexBase {
+ public:
+ RegexObj() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
+ virtual BSONObj query() { return BSON("a" << BSON("$regex" << "^abc")); }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o1_, o2_;
+ };
+
+ class UnhelpfulRegex : public RegexBase {
+ public:
+ UnhelpfulRegex() {
+ BSONObjBuilder b;
+ b.appendMinForType("lower", String);
+ b.appendMaxForType("upper", String);
+ limits = b.obj();
+ }
+
+ virtual BSONObj query() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "abc" );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return limits["lower"]; }
+ virtual BSONElement upper() { return limits["upper"]; }
+ virtual bool upperInclusive() { return false; }
+ BSONObj limits;
+ };
+
+ class In : public Base {
+ public:
+ In() : o1_( BSON( "-" << -3 ) ), o2_( BSON( "-" << 44 ) ) {}
+ virtual BSONObj query() {
+ vector< int > vals;
+ vals.push_back( 4 );
+ vals.push_back( 8 );
+ vals.push_back( 44 );
+ vals.push_back( -1 );
+ vals.push_back( -3 );
+ vals.push_back( 0 );
+ BSONObjBuilder bb;
+ bb.append( "$in", vals );
+ BSONObjBuilder b;
+ b.append( "a", bb.done() );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ BSONObj o1_, o2_;
+ };
+
+ class Equality {
+ public:
+ void run() {
+ FieldRangeSet s( "ns", BSON( "a" << 1 ), true );
+ ASSERT( s.range( "a" ).equality() );
+ FieldRangeSet s2( "ns", BSON( "a" << GTE << 1 << LTE << 1 ), true );
+ ASSERT( s2.range( "a" ).equality() );
+ FieldRangeSet s3( "ns", BSON( "a" << GT << 1 << LTE << 1 ), true );
+ ASSERT( !s3.range( "a" ).equality() );
+ FieldRangeSet s4( "ns", BSON( "a" << GTE << 1 << LT << 1 ), true );
+ ASSERT( !s4.range( "a" ).equality() );
+ FieldRangeSet s5( "ns", BSON( "a" << GTE << 1 << LTE << 1 << GT << 1 ), true );
+ ASSERT( !s5.range( "a" ).equality() );
+ FieldRangeSet s6( "ns", BSON( "a" << GTE << 1 << LTE << 1 << LT << 1 ), true );
+ ASSERT( !s6.range( "a" ).equality() );
+ }
+ };
+
+ class SimplifiedQuery {
+ public:
+ void run() {
+ FieldRangeSet frs( "ns", BSON( "a" << GT << 1 << GT << 5 << LT << 10 << "b" << 4 << "c" << LT << 4 << LT << 6 << "d" << GTE << 0 << GT << 0 << "e" << GTE << 0 << LTE << 10 ), true );
+ BSONObj simple = frs.simplifiedQuery();
+ cout << "simple: " << simple << endl;
+ ASSERT( !simple.getObjectField( "a" ).woCompare( fromjson( "{$gt:5,$lt:10}" ) ) );
+ ASSERT_EQUALS( 4, simple.getIntField( "b" ) );
+ ASSERT( !simple.getObjectField( "c" ).woCompare( BSON("$gte" << -numeric_limits<double>::max() << "$lt" << 4 ) ) );
+ ASSERT( !simple.getObjectField( "d" ).woCompare( BSON("$gt" << 0 << "$lte" << numeric_limits<double>::max() ) ) );
+ ASSERT( !simple.getObjectField( "e" ).woCompare( fromjson( "{$gte:0,$lte:10}" ) ) );
+ }
+ };
+
+ class QueryPatternTest {
+ public:
+ void run() {
+ ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 5 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "b" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << LTE << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << 1 << "b" << 2 ) ) );
+ ASSERT( p( BSON( "a" << 1 << "b" << 3 ) ) != p( BSON( "a" << 1 ) ) );
+ ASSERT( p( BSON( "a" << LT << 1 ) ) == p( BSON( "a" << LTE << 5 ) ) );
+ ASSERT( p( BSON( "a" << LT << 1 << GTE << 0 ) ) == p( BSON( "a" << LTE << 5 << GTE << 0 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) < p( BSON( "a" << 1 << "b" << 1 ) ) );
+ ASSERT( !( p( BSON( "a" << 1 << "b" << 1 ) ) < p( BSON( "a" << 1 ) ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << "a" ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "c" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << -1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 << "c" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) ) );
+ }
+ private:
+ static QueryPattern p( const BSONObj &query, const BSONObj &sort = BSONObj() ) {
+ return FieldRangeSet( "", query, true ).pattern( sort );
+ }
+ };
+
+ class NoWhere {
+ public:
+ void run() {
+ ASSERT_EQUALS( 0, FieldRangeSet( "ns", BSON( "$where" << 1 ), true ).nNontrivialRanges() );
+ }
+ };
+
+ class Numeric {
+ public:
+ void run() {
+ FieldRangeSet f( "", BSON( "a" << 1 ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 2.0 ).firstElement() ) < 0 );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 0.0 ).firstElement() ) > 0 );
+ }
+ };
+
+ class InLowerBound {
+ public:
+ void run() {
+ FieldRangeSet f( "", fromjson( "{a:{$gt:4,$in:[1,2,3,4,5,6]}}" ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 5.0 ).firstElement(), false ) == 0 );
+ ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 6.0 ).firstElement(), false ) == 0 );
+ }
+ };
+
+ class InUpperBound {
+ public:
+ void run() {
+ FieldRangeSet f( "", fromjson( "{a:{$lt:4,$in:[1,2,3,4,5,6]}}" ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 1.0 ).firstElement(), false ) == 0 );
+ ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
+ }
+ };
+
+ class UnionBound {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:9,$lt:12}}" ), true );
+ FieldRange ret = frs.range( "a" );
+ ret |= frs.range( "b" );
+ ASSERT_EQUALS( 2U, ret.intervals().size() );
+ }
+ };
+
+ class MultiBound {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", fromjson( "{a:{$in:[1,3,5,7,9]}}" ), true );
+ FieldRangeSet frs2( "", fromjson( "{a:{$in:[2,3,5,8,9]}}" ), true );
+ FieldRange fr1 = frs1.range( "a" );
+ FieldRange fr2 = frs2.range( "a" );
+ fr1 &= fr2;
+ ASSERT( fr1.min().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
+ ASSERT( fr1.max().woCompare( BSON( "a" << 9.0 ).firstElement(), false ) == 0 );
+ vector< FieldInterval > intervals = fr1.intervals();
+ vector< FieldInterval >::const_iterator j = intervals.begin();
+ double expected[] = { 3, 5, 9 };
+ for( int i = 0; i < 3; ++i, ++j ) {
+ ASSERT_EQUALS( expected[ i ], j->_lower._bound.number() );
+ ASSERT( j->_lower._inclusive );
+ ASSERT( j->_lower == j->_upper );
+ }
+ ASSERT( j == intervals.end() );
+ }
+ };
+
+ class DiffBase {
+ public:
+ virtual ~DiffBase() {}
+ void run() {
+ FieldRangeSet frs( "", fromjson( obj().toString() ), true );
+ FieldRange ret = frs.range( "a" );
+ ret -= frs.range( "b" );
+ check( ret );
+ }
+ protected:
+ void check( const FieldRange &fr ) {
+ vector< FieldInterval > fi = fr.intervals();
+ ASSERT_EQUALS( len(), fi.size() );
+ int i = 0;
+ for( vector< FieldInterval >::const_iterator j = fi.begin(); j != fi.end(); ++j ) {
+ ASSERT_EQUALS( nums()[ i ], j->_lower._bound.numberInt() );
+ ASSERT_EQUALS( incs()[ i ], j->_lower._inclusive );
+ ++i;
+ ASSERT_EQUALS( nums()[ i ], j->_upper._bound.numberInt() );
+ ASSERT_EQUALS( incs()[ i ], j->_upper._inclusive );
+ ++i;
+ }
+ }
+ virtual unsigned len() const = 0;
+ virtual const int *nums() const = 0;
+ virtual const bool *incs() const = 0;
+ virtual BSONObj obj() const = 0;
+ };
+
+ class TwoRangeBase : public DiffBase {
+ public:
+ TwoRangeBase( string obj, int low, int high, bool lowI, bool highI )
+ : _obj( obj ) {
+ _n[ 0 ] = low;
+ _n[ 1 ] = high;
+ _b[ 0 ] = lowI;
+ _b[ 1 ] = highI;
+ }
+ private:
+ virtual unsigned len() const { return 1; }
+ virtual const int *nums() const { return _n; }
+ virtual const bool *incs() const { return _b; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ int _n[ 2 ];
+ bool _b[ 2 ];
+ };
+
+ struct Diff1 : public TwoRangeBase {
+ Diff1() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:3,$lt:4}}", 1, 2, false, false ) {}
+ };
+
+ struct Diff2 : public TwoRangeBase {
+ Diff2() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:2,$lt:4}}", 1, 2, false, false ) {}
+ };
+
+ struct Diff3 : public TwoRangeBase {
+ Diff3() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gt:2,$lt:4}}", 1, 2, false, true ) {}
+ };
+
+ struct Diff4 : public TwoRangeBase {
+ Diff4() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff5 : public TwoRangeBase {
+ Diff5() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff6 : public TwoRangeBase {
+ Diff6() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff7 : public TwoRangeBase {
+ Diff7() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff8 : public TwoRangeBase {
+ Diff8() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff9 : public TwoRangeBase {
+ Diff9() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff10 : public TwoRangeBase {
+ Diff10() : TwoRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
+ };
+
+ class SplitRangeBase : public DiffBase {
+ public:
+ SplitRangeBase( string obj, int low1, bool low1I, int high1, bool high1I, int low2, bool low2I, int high2, bool high2I )
+ : _obj( obj ) {
+ _n[ 0 ] = low1;
+ _n[ 1 ] = high1;
+ _n[ 2 ] = low2;
+ _n[ 3 ] = high2;
+ _b[ 0 ] = low1I;
+ _b[ 1 ] = high1I;
+ _b[ 2 ] = low2I;
+ _b[ 3 ] = high2I;
+ }
+ private:
+ virtual unsigned len() const { return 2; }
+ virtual const int *nums() const { return _n; }
+ virtual const bool *incs() const { return _b; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ int _n[ 4 ];
+ bool _b[ 4 ];
+ };
+
+ struct Diff11 : public SplitRangeBase {
+ Diff11() : SplitRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 4, true) {}
+ };
+
+ struct Diff12 : public SplitRangeBase {
+ Diff12() : SplitRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 5, false) {}
+ };
+
+ struct Diff13 : public TwoRangeBase {
+ Diff13() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff14 : public SplitRangeBase {
+ Diff14() : SplitRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:4}}", 1, true, 1, true, 4, true, 5, false) {}
+ };
+
+ struct Diff15 : public TwoRangeBase {
+ Diff15() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff16 : public TwoRangeBase {
+ Diff16() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff17 : public TwoRangeBase {
+ Diff17() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff18 : public TwoRangeBase {
+ Diff18() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:4}}", 4, 5, false, false) {}
+ };
+
+ struct Diff19 : public TwoRangeBase {
+ Diff19() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff20 : public TwoRangeBase {
+ Diff20() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff21 : public TwoRangeBase {
+ Diff21() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, true, true) {}
+ };
+
+ struct Diff22 : public TwoRangeBase {
+ Diff22() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff23 : public TwoRangeBase {
+ Diff23() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:0.5}}", 1, 5, false, true) {}
+ };
+
+ struct Diff24 : public TwoRangeBase {
+ Diff24() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:0}", 1, 5, false, true) {}
+ };
+
+ struct Diff25 : public TwoRangeBase {
+ Diff25() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:0}", 1, 5, true, true) {}
+ };
+
+ struct Diff26 : public TwoRangeBase {
+ Diff26() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:1}", 1, 5, false, true) {}
+ };
+
+ struct Diff27 : public TwoRangeBase {
+ Diff27() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:1}", 1, 5, false, true) {}
+ };
+
+ struct Diff28 : public SplitRangeBase {
+ Diff28() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:3}", 1, true, 3, false, 3, false, 5, true) {}
+ };
+
+ struct Diff29 : public TwoRangeBase {
+ Diff29() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:5}", 1, 5, true, false) {}
+ };
+
+ struct Diff30 : public TwoRangeBase {
+ Diff30() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:5}", 1, 5, true, false) {}
+ };
+
+ struct Diff31 : public TwoRangeBase {
+ Diff31() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:6}", 1, 5, true, false) {}
+ };
+
+ struct Diff32 : public TwoRangeBase {
+ Diff32() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:6}", 1, 5, true, true) {}
+ };
+
+ class EmptyBase : public DiffBase {
+ public:
+ EmptyBase( string obj )
+ : _obj( obj ) {}
+ private:
+ virtual unsigned len() const { return 0; }
+ virtual const int *nums() const { return 0; }
+ virtual const bool *incs() const { return 0; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ };
+
+ struct Diff33 : public EmptyBase {
+ Diff33() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:6}}" ) {}
+ };
+
+ struct Diff34 : public EmptyBase {
+ Diff34() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
+ };
+
+ struct Diff35 : public EmptyBase {
+ Diff35() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
+ };
+
+ struct Diff36 : public EmptyBase {
+ Diff36() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:6}}" ) {}
+ };
+
+ struct Diff37 : public TwoRangeBase {
+ Diff37() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:6}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff38 : public EmptyBase {
+ Diff38() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:5}}" ) {}
+ };
+
+ struct Diff39 : public EmptyBase {
+ Diff39() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:5}}" ) {}
+ };
+
+ struct Diff40 : public EmptyBase {
+ Diff40() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:0,$lte:5}}" ) {}
+ };
+
+ struct Diff41 : public TwoRangeBase {
+ Diff41() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff42 : public EmptyBase {
+ Diff42() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:5}}" ) {}
+ };
+
+ struct Diff43 : public EmptyBase {
+ Diff43() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lte:5}}" ) {}
+ };
+
+ struct Diff44 : public EmptyBase {
+ Diff44() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff45 : public EmptyBase {
+ Diff45() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff46 : public TwoRangeBase {
+ Diff46() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff47 : public EmptyBase {
+ Diff47() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lte:5}}" ) {}
+ };
+
+ struct Diff48 : public TwoRangeBase {
+ Diff48() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff49 : public EmptyBase {
+ Diff49() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff50 : public TwoRangeBase {
+ Diff50() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff51 : public TwoRangeBase {
+ Diff51() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff52 : public EmptyBase {
+ Diff52() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff53 : public EmptyBase {
+ Diff53() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff54 : public SplitRangeBase {
+ Diff54() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:5}}", 1, true, 1, true, 5, true, 5, true ) {}
+ };
+
+ struct Diff55 : public TwoRangeBase {
+ Diff55() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff56 : public TwoRangeBase {
+ Diff56() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff57 : public EmptyBase {
+ Diff57() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff58 : public TwoRangeBase {
+ Diff58() : TwoRangeBase( "{a:1,b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff59 : public EmptyBase {
+ Diff59() : EmptyBase( "{a:1,b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff60 : public EmptyBase {
+ Diff60() : EmptyBase( "{a:2,b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff61 : public EmptyBase {
+ Diff61() : EmptyBase( "{a:5,b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff62 : public TwoRangeBase {
+ Diff62() : TwoRangeBase( "{a:5,b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff63 : public EmptyBase {
+ Diff63() : EmptyBase( "{a:5,b:5}" ) {}
+ };
+
+ struct Diff64 : public TwoRangeBase {
+ Diff64() : TwoRangeBase( "{a:{$gte:1,$lte:2},b:{$gt:0,$lte:1}}", 1, 2, false, true ) {}
+ };
+
+ class DiffMulti1 : public DiffBase {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ), true );
+ FieldRange ret = frs.range( "a" );
+ FieldRange other = frs.range( "b" );
+ other |= frs.range( "c" );
+ other |= frs.range( "d" );
+ other |= frs.range( "e" );
+ ret -= other;
+ check( ret );
+ }
+ protected:
+ virtual unsigned len() const { return 3; }
+ virtual const int *nums() const { static int n[] = { 2, 3, 3, 4, 5, 7 }; return n; }
+ virtual const bool *incs() const { static bool b[] = { true, false, false, true, true, true }; return b; }
+ virtual BSONObj obj() const { return BSONObj(); }
+ };
+
+ class DiffMulti2 : public DiffBase {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ), true );
+ FieldRange mask = frs.range( "a" );
+ FieldRange ret = frs.range( "b" );
+ ret |= frs.range( "c" );
+ ret |= frs.range( "d" );
+ ret |= frs.range( "e" );
+ ret -= mask;
+ check( ret );
+ }
+ protected:
+ virtual unsigned len() const { return 2; }
+ virtual const int *nums() const { static int n[] = { 0, 1, 9, 10 }; return n; }
+ virtual const bool *incs() const { static bool b[] = { false, true, true, false }; return b; }
+ virtual BSONObj obj() const { return BSONObj(); }
+ };
+
+ } // namespace FieldRangeTests
+
+ namespace FieldRangeSetTests {
+
+ class Intersect {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", fromjson( "{b:{$in:[5,6]},c:7,d:{$in:[8,9]}}" ), true );
+ FieldRangeSet frs2( "", fromjson( "{a:1,b:5,c:{$in:[7,8]},d:{$in:[8,9]},e:10}" ), true );
+ frs1 &= frs2;
+ ASSERT_EQUALS( fromjson( "{a:1,b:5,c:7,d:{$gte:8,$lte:9},e:10}" ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MultiKeyIntersect {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSONObj(), false );
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 ), false );
+ FieldRangeSet frs3( "", BSON( "a" << LT << 6 ), false );
+ // An intersection with a trivial range is allowed.
+ frs1 &= frs2;
+ ASSERT_EQUALS( frs2.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ // An intersection with a nontrivial range is not allowed, as it might prevent a valid
+ // multikey match.
+ frs1 &= frs3;
+ ASSERT_EQUALS( frs2.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ // Now intersect with a fully contained range.
+ FieldRangeSet frs4( "", BSON( "a" << GT << 6 ), false );
+ frs1 &= frs4;
+ ASSERT_EQUALS( frs4.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MultiKeyDiff {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 ), false );
+ FieldRangeSet frs2( "", BSON( "a" << GT << 6 ), false );
+ // Range subtraction is no different for multikey ranges.
+ frs1 -= frs2;
+ ASSERT_EQUALS( BSON( "a" << GT << 4 << LTE << 6 ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MatchPossible {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 ), true );
+ ASSERT( frs1.matchPossible() );
+ // Conflicting constraints invalid for a single key set.
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 << LT << 2 ), true );
+ ASSERT( !frs2.matchPossible() );
+ // Conflicting constraints not possible for a multi key set.
+ FieldRangeSet frs3( "", BSON( "a" << GT << 4 << LT << 2 ), false );
+ ASSERT( frs3.matchPossible() );
+ }
+ };
+
+ class MatchPossibleForIndex {
+ public:
+ void run() {
+ // Conflicting constraints not possible for a multi key set.
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 << LT << 2 ), false );
+ ASSERT( frs1.matchPossibleForIndex( BSON( "a" << 1 ) ) );
+ // Conflicting constraints for a multi key set.
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 << LT << 2 ), true );
+ ASSERT( !frs2.matchPossibleForIndex( BSON( "a" << 1 ) ) );
+ // If the index doesn't include the key, it is not single key invalid.
+ ASSERT( frs2.matchPossibleForIndex( BSON( "b" << 1 ) ) );
+ // If the index key is not an index, the set is not single key invalid.
+ ASSERT( frs2.matchPossibleForIndex( BSON( "$natural" << 1 ) ) );
+ ASSERT( frs2.matchPossibleForIndex( BSONObj() ) );
+ }
+ };
+
+ } // namespace FieldRangeSetTests
+
+ namespace FieldRangeSetPairTests {
+
+ class NoNontrivialRanges {
+ public:
+ void run() {
+ FieldRangeSetPair frsp1( "", BSONObj() );
+ ASSERT( frsp1.noNontrivialRanges() );
+ FieldRangeSetPair frsp2( "", BSON( "a" << 1 ) );
+ ASSERT( !frsp2.noNontrivialRanges() );
+ FieldRangeSetPair frsp3( "", BSON( "a" << GT << 1 ) );
+ ASSERT( !frsp3.noNontrivialRanges() );
+ // A single key invalid constraint is still nontrivial.
+ FieldRangeSetPair frsp4( "", BSON( "a" << GT << 1 << LT << 0 ) );
+ ASSERT( !frsp4.noNontrivialRanges() );
+ // Still nontrivial if multikey invalid.
+ frsp4 -= frsp4.frsForIndex( 0, -1 );
+ ASSERT( !frsp4.noNontrivialRanges() );
+ }
+ };
+
+ class MatchPossible {
+ public:
+ void run() {
+ // Match possible for simple query.
+ FieldRangeSetPair frsp1( "", BSON( "a" << 1 ) );
+ ASSERT( frsp1.matchPossible() );
+ // Match possible for single key invalid query.
+ FieldRangeSetPair frsp2( "", BSON( "a" << GT << 1 << LT << 0 ) );
+ ASSERT( frsp2.matchPossible() );
+ // Match not possible for multi key invalid query.
+ frsp1 -= frsp1.frsForIndex( 0, - 1 );
+ ASSERT( !frsp1.matchPossible() );
+ }
+ };
+
+ class IndexBase {
+ public:
+ IndexBase() : _ctx( ns() ) , indexNum_( 0 ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ ~IndexBase() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ dropNS( s );
+ }
+ protected:
+ static const char *ns() { return "unittests.FieldRangeSetPairTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ IndexDetails *index( const BSONObj &key ) {
+ stringstream ss;
+ ss << indexNum_++;
+ string name = ss.str();
+ client_.resetIndexCache();
+ client_.ensureIndex( ns(), key, false, name.c_str() );
+ NamespaceDetails *d = nsd();
+ for( int i = 0; i < d->nIndexes; ++i ) {
+ if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
+ return &d->idx(i);
+ }
+ assert( false );
+ return 0;
+ }
+ int indexno( const BSONObj &key ) {
+ return nsd()->idxNo( *index(key) );
+ }
+ static DBDirectClient client_;
+ private:
+ dblock lk_;
+ Client::Context _ctx;
+ int indexNum_;
+ };
+ DBDirectClient IndexBase::client_;
+
+ class MatchPossibleForIndex : public IndexBase {
+ public:
+ void run() {
+ int a = indexno( BSON( "a" << 1 ) );
+ int b = indexno( BSON( "b" << 1 ) );
+ IndexBase::client_.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 2 ) << "b" << 1 ) );
+ // Valid ranges match possible for both indexes.
+ FieldRangeSetPair frsp1( ns(), BSON( "a" << GT << 1 << LT << 4 << "b" << GT << 1 << LT << 4 ) );
+ ASSERT( frsp1.matchPossibleForIndex( nsd(), a, BSON( "a" << 1 ) ) );
+ ASSERT( frsp1.matchPossibleForIndex( nsd(), b, BSON( "b" << 1 ) ) );
+ // Single key invalid range means match impossible for single key index.
+ FieldRangeSetPair frsp2( ns(), BSON( "a" << GT << 4 << LT << 1 << "b" << GT << 4 << LT << 1 ) );
+ ASSERT( frsp2.matchPossibleForIndex( nsd(), a, BSON( "a" << 1 ) ) );
+ ASSERT( !frsp2.matchPossibleForIndex( nsd(), b, BSON( "b" << 1 ) ) );
+ }
+ };
+
+ } // namespace FieldRangeSetPairTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "queryutil" ) {}
+
+ void setupTests() {
+ add< FieldRangeTests::Empty >();
+ add< FieldRangeTests::Eq >();
+ add< FieldRangeTests::DupEq >();
+ add< FieldRangeTests::Lt >();
+ add< FieldRangeTests::Lte >();
+ add< FieldRangeTests::Gt >();
+ add< FieldRangeTests::Gte >();
+ add< FieldRangeTests::TwoLt >();
+ add< FieldRangeTests::TwoGt >();
+ add< FieldRangeTests::EqGte >();
+ add< FieldRangeTests::EqGteInvalid >();
+ add< FieldRangeTests::Regex >();
+ add< FieldRangeTests::RegexObj >();
+ add< FieldRangeTests::UnhelpfulRegex >();
+ add< FieldRangeTests::In >();
+ add< FieldRangeTests::Equality >();
+ add< FieldRangeTests::SimplifiedQuery >();
+ add< FieldRangeTests::QueryPatternTest >();
+ add< FieldRangeTests::NoWhere >();
+ add< FieldRangeTests::Numeric >();
+ add< FieldRangeTests::InLowerBound >();
+ add< FieldRangeTests::InUpperBound >();
+ add< FieldRangeTests::UnionBound >();
+ add< FieldRangeTests::MultiBound >();
+ add< FieldRangeTests::Diff1 >();
+ add< FieldRangeTests::Diff2 >();
+ add< FieldRangeTests::Diff3 >();
+ add< FieldRangeTests::Diff4 >();
+ add< FieldRangeTests::Diff5 >();
+ add< FieldRangeTests::Diff6 >();
+ add< FieldRangeTests::Diff7 >();
+ add< FieldRangeTests::Diff8 >();
+ add< FieldRangeTests::Diff9 >();
+ add< FieldRangeTests::Diff10 >();
+ add< FieldRangeTests::Diff11 >();
+ add< FieldRangeTests::Diff12 >();
+ add< FieldRangeTests::Diff13 >();
+ add< FieldRangeTests::Diff14 >();
+ add< FieldRangeTests::Diff15 >();
+ add< FieldRangeTests::Diff16 >();
+ add< FieldRangeTests::Diff17 >();
+ add< FieldRangeTests::Diff18 >();
+ add< FieldRangeTests::Diff19 >();
+ add< FieldRangeTests::Diff20 >();
+ add< FieldRangeTests::Diff21 >();
+ add< FieldRangeTests::Diff22 >();
+ add< FieldRangeTests::Diff23 >();
+ add< FieldRangeTests::Diff24 >();
+ add< FieldRangeTests::Diff25 >();
+ add< FieldRangeTests::Diff26 >();
+ add< FieldRangeTests::Diff27 >();
+ add< FieldRangeTests::Diff28 >();
+ add< FieldRangeTests::Diff29 >();
+ add< FieldRangeTests::Diff30 >();
+ add< FieldRangeTests::Diff31 >();
+ add< FieldRangeTests::Diff32 >();
+ add< FieldRangeTests::Diff33 >();
+ add< FieldRangeTests::Diff34 >();
+ add< FieldRangeTests::Diff35 >();
+ add< FieldRangeTests::Diff36 >();
+ add< FieldRangeTests::Diff37 >();
+ add< FieldRangeTests::Diff38 >();
+ add< FieldRangeTests::Diff39 >();
+ add< FieldRangeTests::Diff40 >();
+ add< FieldRangeTests::Diff41 >();
+ add< FieldRangeTests::Diff42 >();
+ add< FieldRangeTests::Diff43 >();
+ add< FieldRangeTests::Diff44 >();
+ add< FieldRangeTests::Diff45 >();
+ add< FieldRangeTests::Diff46 >();
+ add< FieldRangeTests::Diff47 >();
+ add< FieldRangeTests::Diff48 >();
+ add< FieldRangeTests::Diff49 >();
+ add< FieldRangeTests::Diff50 >();
+ add< FieldRangeTests::Diff51 >();
+ add< FieldRangeTests::Diff52 >();
+ add< FieldRangeTests::Diff53 >();
+ add< FieldRangeTests::Diff54 >();
+ add< FieldRangeTests::Diff55 >();
+ add< FieldRangeTests::Diff56 >();
+ add< FieldRangeTests::Diff57 >();
+ add< FieldRangeTests::Diff58 >();
+ add< FieldRangeTests::Diff59 >();
+ add< FieldRangeTests::Diff60 >();
+ add< FieldRangeTests::Diff61 >();
+ add< FieldRangeTests::Diff62 >();
+ add< FieldRangeTests::Diff63 >();
+ add< FieldRangeTests::Diff64 >();
+ add< FieldRangeTests::DiffMulti1 >();
+ add< FieldRangeTests::DiffMulti2 >();
+ add< FieldRangeSetTests::Intersect >();
+ add< FieldRangeSetTests::MultiKeyIntersect >();
+ add< FieldRangeSetTests::MultiKeyDiff >();
+ add< FieldRangeSetTests::MatchPossible >();
+ add< FieldRangeSetTests::MatchPossibleForIndex >();
+ add< FieldRangeSetPairTests::NoNontrivialRanges >();
+ add< FieldRangeSetPairTests::MatchPossible >();
+ add< FieldRangeSetPairTests::MatchPossibleForIndex >();
+ }
+ } myall;
+
+} // namespace QueryUtilTests
+
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
new file mode 100644
index 00000000000..c1fca3b1ad6
--- /dev/null
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -0,0 +1,227 @@
+// replsettests.cpp : Unit tests for replica sets
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/repl.h"
+
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+#include "../db/oplog.h"
+#include "../db/queryoptimizer.h"
+
+#include "../db/repl/rs.h"
+
+namespace mongo {
+ void createOplog();
+}
+
+namespace ReplSetTests {
+
+ class Base {
+ static DBDirectClient client_;
+ public:
+ Base() {
+ cmdLine._replSet = "foo";
+ cmdLine.oplogSize = 5;
+ createOplog();
+ }
+
+ static const char *ns() {
+ return "unittests.repltests";
+ }
+
+ DBDirectClient *client() const { return &client_; }
+
+ static void insert( const BSONObj &o, bool god = false ) {
+ dblock lk;
+ Client::Context ctx( ns() );
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize(), god );
+ }
+ BSONObj findOne( const BSONObj &query = BSONObj() ) const {
+ return client()->findOne( ns(), query );
+ }
+ };
+ DBDirectClient Base::client_;
+
+
+ class MockInitialSync : public replset::InitialSync {
+ int step;
+ public:
+ MockInitialSync() : replset::InitialSync(""), step(0), failOnStep(SUCCEED), retry(true) {}
+
+ enum FailOn {SUCCEED, FAIL_FIRST_APPLY, FAIL_BOTH_APPLY};
+
+ FailOn failOnStep;
+ bool retry;
+
+ // instead of actually applying operations, we return success or failure
+ virtual bool syncApply(const BSONObj& o) {
+ step++;
+
+ if ((failOnStep == FAIL_FIRST_APPLY && step == 1) ||
+ (failOnStep == FAIL_BOTH_APPLY)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ virtual bool shouldRetry(const BSONObj& o) {
+ return retry;
+ }
+ };
+
+ class TestInitApplyOp : public Base {
+ public:
+ void run() {
+ writelock lk("");
+
+ OpTime o1 = OpTime::now();
+ OpTime o2 = OpTime::now();
+
+ BSONObjBuilder b;
+ b.appendTimestamp("ts", o2.asLL());
+ BSONObj obj = b.obj();
+
+ MockInitialSync mock;
+
+ // all three should succeed
+ mock.applyOp(obj, o1);
+
+ mock.failOnStep = MockInitialSync::FAIL_FIRST_APPLY;
+ mock.applyOp(obj, o1);
+
+ mock.retry = false;
+ mock.applyOp(obj, o1);
+
+ // force failure
+ MockInitialSync mock2;
+ mock2.failOnStep = MockInitialSync::FAIL_BOTH_APPLY;
+
+ ASSERT_THROWS(mock2.applyOp(obj, o2), UserException);
+ }
+ };
+
+ class SyncTest2 : public replset::InitialSync {
+ public:
+ bool insertOnRetry;
+ SyncTest2() : replset::InitialSync(""), insertOnRetry(false) {}
+ virtual ~SyncTest2() {}
+ virtual bool shouldRetry(const BSONObj& o) {
+ if (!insertOnRetry) {
+ return true;
+ }
+
+ Base::insert(BSON("_id" << 123));
+ return true;
+ }
+ };
+
+ class TestInitApplyOp2 : public Base {
+ public:
+ void run() {
+ writelock lk("");
+
+ OpTime o1 = OpTime::now();
+ OpTime o2 = OpTime::now();
+
+ BSONObjBuilder b;
+ b.appendTimestamp("ts", o2.asLL());
+ b.append("op", "u");
+ b.append("o", BSON("$set" << BSON("x" << 456)));
+ b.append("o2", BSON("_id" << 123));
+ b.append("ns", ns());
+ BSONObj obj = b.obj();
+
+ SyncTest2 sync;
+ ASSERT_THROWS(sync.applyOp(obj, o1), UserException);
+
+ sync.insertOnRetry = true;
+ // succeeds
+ sync.applyOp(obj, o1);
+
+ BSONObj fin = findOne();
+ assert(fin["x"].Number() == 456);
+ }
+ };
+
+ class CappedInitialSync : public Base {
+ string _ns;
+ dblock lk;
+ Client::Context _context;
+
+ string spec() const {
+ return "{\"capped\":true,\"size\":512}";
+ }
+
+ void create() {
+ dblock lk;
+ string err;
+ ASSERT(userCreateNS( _ns.c_str(), fromjson( spec() ), err, false ));
+ }
+
+ void drop() {
+ string s( _ns );
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( s, errmsg, result );
+ }
+ public:
+ CappedInitialSync() : _ns("unittests.foo.bar"), _context(_ns) {
+ if (nsdetails(_ns.c_str()) != NULL) {
+ drop();
+ }
+ }
+ ~CappedInitialSync() {
+ if ( nsdetails(_ns.c_str()) == NULL )
+ return;
+ drop();
+ }
+
+ void run() {
+ create();
+
+ BSONObjBuilder b;
+ b.appendTimestamp("ts", OpTime::now().asLL());
+ b.append("op", "u");
+ b.append("o", BSON("$set" << BSON("x" << 456)));
+ b.append("o2", BSON("_id" << 123 << "x" << 123));
+ b.append("ns", _ns);
+
+ // in an annoying twist of api, returns true on failure
+ assert(applyOperation_inlock(b.obj(), true));
+ }
+ };
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "replset" ) {
+ }
+
+ void setupTests() {
+ add< TestInitApplyOp >();
+ add< TestInitApplyOp2 >();
+ add< CappedInitialSync >();
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
new file mode 100644
index 00000000000..86288ad9426
--- /dev/null
+++ b/src/mongo/dbtests/repltests.cpp
@@ -0,0 +1,1228 @@
+// repltests.cpp : Unit tests for replication
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/repl.h"
+
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+#include "../db/oplog.h"
+#include "../db/queryoptimizer.h"
+
+#include "../db/repl/rs.h"
+
+namespace mongo {
+ void createOplog();
+}
+
+namespace ReplTests {
+
+ BSONObj f( const char *s ) {
+ return fromjson( s );
+ }
+
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context( ns() ) {
+ replSettings.master = true;
+ createOplog();
+ ensureHaveIdIndex( ns() );
+ }
+ ~Base() {
+ try {
+ replSettings.master = false;
+ deleteAll( ns() );
+ deleteAll( cllNS() );
+ }
+ catch ( ... ) {
+ FAIL( "Exception while cleaning up test" );
+ }
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.repltests";
+ }
+ static const char *cllNS() {
+ return "local.oplog.$main";
+ }
+ DBDirectClient *client() const { return &client_; }
+ BSONObj one( const BSONObj &query = BSONObj() ) const {
+ return client()->findOne( ns(), query );
+ }
+ void checkOne( const BSONObj &o ) const {
+ check( o, one( o ) );
+ }
+ void checkAll( const BSONObj &o ) const {
+ auto_ptr< DBClientCursor > c = client()->query( ns(), o );
+ assert( c->more() );
+ while( c->more() ) {
+ check( o, c->next() );
+ }
+ }
+ void check( const BSONObj &expected, const BSONObj &got ) const {
+ if ( expected.woCompare( got ) ) {
+ out() << "expected: " << expected.toString()
+ << ", got: " << got.toString() << endl;
+ }
+ ASSERT_EQUALS( expected , got );
+ }
+ BSONObj oneOp() const {
+ return client()->findOne( cllNS(), BSONObj() );
+ }
+ int count() const {
+ int count = 0;
+ dblock lk;
+ Client::Context ctx( ns() );
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() );
+ for(; c->ok(); c->advance(), ++count ) {
+// cout << "obj: " << c->current().toString() << endl;
+ }
+ return count;
+ }
+ static int opCount() {
+ dblock lk;
+ Client::Context ctx( cllNS() );
+ int count = 0;
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( cllNS() ); c->ok(); c->advance() )
+ ++count;
+ return count;
+ }
+ static void applyAllOperations() {
+ dblock lk;
+ vector< BSONObj > ops;
+ {
+ Client::Context ctx( cllNS() );
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( cllNS() ); c->ok(); c->advance() )
+ ops.push_back( c->current() );
+ }
+ {
+ Client::Context ctx( ns() );
+ BSONObjBuilder b;
+ b.append("host", "localhost");
+ b.appendTimestamp("syncedTo", 0);
+ ReplSource a(b.obj());
+ for( vector< BSONObj >::iterator i = ops.begin(); i != ops.end(); ++i ) {
+ a.applyOperation( *i );
+ }
+ }
+ }
+ static void printAll( const char *ns ) {
+ dblock lk;
+ Client::Context ctx( ns );
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns );
+ vector< DiskLoc > toDelete;
+ out() << "all for " << ns << endl;
+ for(; c->ok(); c->advance() ) {
+ out() << c->current().toString() << endl;
+ }
+ }
+ // These deletes don't get logged.
+ static void deleteAll( const char *ns ) {
+ dblock lk;
+ Client::Context ctx( ns );
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns );
+ vector< DiskLoc > toDelete;
+ for(; c->ok(); c->advance() ) {
+ toDelete.push_back( c->currLoc() );
+ }
+ for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
+ theDataFileMgr.deleteRecord( ns, i->rec(), *i, true );
+ }
+ }
+ static void insert( const BSONObj &o, bool god = false ) {
+ dblock lk;
+ Client::Context ctx( ns() );
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize(), god );
+ }
+ static BSONObj wid( const char *json ) {
+ class BSONObjBuilder b;
+ OID id;
+ id.init();
+ b.appendOID( "_id", &id );
+ b.appendElements( fromjson( json ) );
+ return b.obj();
+ }
+ private:
+ static DBDirectClient client_;
+ };
+ DBDirectClient Base::client_;
+
+ class LogBasic : public Base {
+ public:
+ void run() {
+ ASSERT_EQUALS( 1, opCount() );
+ client()->insert( ns(), fromjson( "{\"a\":\"b\"}" ) );
+ ASSERT_EQUALS( 2, opCount() );
+ }
+ };
+
+ namespace Idempotence {
+
+ class Base : public ReplTests::Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ reset();
+ doIt();
+ int nOps = opCount();
+ check();
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS( nOps, opCount() );
+
+ reset();
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS( nOps, opCount() );
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS( nOps, opCount() );
+ }
+ protected:
+ virtual void doIt() const = 0;
+ virtual void check() const = 0;
+ virtual void reset() const = 0;
+ };
+
+ class InsertTimestamp : public Base {
+ public:
+ void doIt() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.appendTimestamp( "t" );
+ client()->insert( ns(), b.done() );
+ date_ = client()->findOne( ns(), QUERY( "a" << 1 ) ).getField( "t" ).date();
+ }
+ void check() const {
+ BSONObj o = client()->findOne( ns(), QUERY( "a" << 1 ) );
+ ASSERT( 0 != o.getField( "t" ).date() );
+ ASSERT_EQUALS( date_, o.getField( "t" ).date() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ private:
+ mutable Date_t date_;
+ };
+
+ class InsertAutoId : public Base {
+ public:
+ InsertAutoId() : o_( fromjson( "{\"a\":\"b\"}" ) ) {}
+ void doIt() const {
+ client()->insert( ns(), o_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj o_;
+ };
+
+ class InsertWithId : public InsertAutoId {
+ public:
+ InsertWithId() {
+ o_ = fromjson( "{\"_id\":ObjectId(\"0f0f0f0f0f0f0f0f0f0f0f0f\"),\"a\":\"b\"}" );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( o_ );
+ }
+ };
+
+ class InsertTwo : public Base {
+ public:
+ InsertTwo() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ t_( fromjson( "{'_id':2,c:'d'}" ) ) {}
+ void doIt() const {
+ vector< BSONObj > v;
+ v.push_back( o_ );
+ v.push_back( t_ );
+ client()->insert( ns(), v );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ checkOne( o_ );
+ checkOne( t_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ private:
+ BSONObj o_;
+ BSONObj t_;
+ };
+
+ class InsertTwoIdentical : public Base {
+ public:
+ InsertTwoIdentical() : o_( fromjson( "{\"a\":\"b\"}" ) ) {}
+ void doIt() const {
+ client()->insert( ns(), o_ );
+ client()->insert( ns(), o_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ private:
+ BSONObj o_;
+ };
+
+ class UpdateTimestamp : public Base {
+ public:
+ void doIt() const {
+ BSONObjBuilder b;
+ b.append( "_id", 1 );
+ b.appendTimestamp( "t" );
+ client()->update( ns(), BSON( "_id" << 1 ), b.done() );
+ date_ = client()->findOne( ns(), QUERY( "_id" << 1 ) ).getField( "t" ).date();
+ }
+ void check() const {
+ BSONObj o = client()->findOne( ns(), QUERY( "_id" << 1 ) );
+ ASSERT( 0 != o.getField( "t" ).date() );
+ ASSERT_EQUALS( date_, o.getField( "t" ).date() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( BSON( "_id" << 1 ) );
+ }
+ private:
+ mutable Date_t date_;
+ };
+
+ class UpdateSameField : public Base {
+ public:
+ UpdateSameField() :
+ q_( fromjson( "{a:'b'}" ) ),
+ o1_( wid( "{a:'b'}" ) ),
+ o2_( wid( "{a:'b'}" ) ),
+ u_( fromjson( "{a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ ASSERT( !client()->findOne( ns(), q_ ).isEmpty() );
+ ASSERT( !client()->findOne( ns(), u_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o1_ );
+ insert( o2_ );
+ }
+ private:
+ BSONObj q_, o1_, o2_, u_;
+ };
+
+ class UpdateSameFieldWithId : public Base {
+ public:
+ UpdateSameFieldWithId() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ q_( fromjson( "{a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ ASSERT( !client()->findOne( ns(), q_ ).isEmpty() );
+ ASSERT( !client()->findOne( ns(), u_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ insert( fromjson( "{'_id':2,a:'b'}" ) );
+ }
+ private:
+ BSONObj o_, q_, u_;
+ };
+
+ class UpdateSameFieldExplicitId : public Base {
+ public:
+ UpdateSameFieldExplicitId() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), o_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( u_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, u_;
+ };
+
+ class UpdateDifferentFieldExplicitId : public Base {
+ public:
+ UpdateDifferentFieldExplicitId() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( u_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_;
+ };
+
+ class UpsertUpdateNoMods : public UpdateDifferentFieldExplicitId {
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ };
+
+ class UpsertInsertNoMods : public InsertAutoId {
+ void doIt() const {
+ client()->update( ns(), fromjson( "{a:'c'}" ), o_, true );
+ }
+ };
+
+ class UpdateSet : public Base {
+ public:
+ UpdateSet() :
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$set:{a:7}}" ) ),
+ ou_( fromjson( "{'_id':1,a:7}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class UpdateInc : public Base {
+ public:
+ UpdateInc() :
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{'_id':1,a:8}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class UpdateInc2 : public Base {
+ public:
+ UpdateInc2() :
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3},$set:{x:5}}" ) ),
+ ou_( fromjson( "{'_id':1,a:8,x:5}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class IncEmbedded : public Base {
+ public:
+ IncEmbedded() :
+ o_( fromjson( "{'_id':1,a:{b:3},b:{b:1}}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a.b':1,'b.b':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:{b:4},b:{b:2}}" ) )
+ {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class IncCreates : public Base {
+ public:
+ IncCreates() :
+ o_( fromjson( "{'_id':1}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:1}") )
+ {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+
+ class UpsertInsertIdMod : public Base {
+ public:
+ UpsertInsertIdMod() :
+ q_( fromjson( "{'_id':5,a:4}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{'_id':5,a:7}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj q_, u_, ou_;
+ };
+
+ class UpsertInsertSet : public Base {
+ public:
+ UpsertInsertSet() :
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$set:{a:7}}" ) ),
+ ou_( fromjson( "{a:7}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ ASSERT( !client()->findOne( ns(), ou_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':7,a:7}" ) );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class UpsertInsertInc : public Base {
+ public:
+ UpsertInsertInc() :
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{a:8}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ ASSERT( !client()->findOne( ns(), ou_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class MultiInc : public Base {
+ public:
+
+ string s() const {
+ stringstream ss;
+ auto_ptr<DBClientCursor> cc = client()->query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
+ bool first = true;
+ while ( cc->more() ) {
+ if ( first ) first = false;
+ else ss << ",";
+
+ BSONObj o = cc->next();
+ ss << o["x"].numberInt();
+ }
+ return ss.str();
+ }
+
+ void doIt() const {
+ client()->insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
+ client()->insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
+
+ ASSERT_EQUALS( "1,5" , s() );
+
+ client()->update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "2,5" , s() );
+
+ client()->update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "3,5" , s() );
+
+ client()->update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
+ check();
+ }
+
+ void check() const {
+ ASSERT_EQUALS( "4,6" , s() );
+ }
+
+ void reset() const {
+ deleteAll( ns() );
+ }
+ };
+
+ class UpdateWithoutPreexistingId : public Base {
+ public:
+ UpdateWithoutPreexistingId() :
+ o_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{a:5}" ) ),
+ ot_( fromjson( "{b:4}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), o_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ checkOne( u_ );
+ checkOne( ot_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( ot_, true );
+ insert( o_, true );
+ }
+ protected:
+ BSONObj o_, u_, ot_;
+ };
+
+ class Remove : public Base {
+ public:
+ Remove() :
+ o1_( f( "{\"_id\":\"010101010101010101010101\",\"a\":\"b\"}" ) ),
+ o2_( f( "{\"_id\":\"010101010101010101010102\",\"a\":\"b\"}" ) ),
+ q_( f( "{\"a\":\"b\"}" ) ) {}
+ void doIt() const {
+ client()->remove( ns(), q_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 0, count() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o1_ );
+ insert( o2_ );
+ }
+ protected:
+ BSONObj o1_, o2_, q_;
+ };
+
+ class RemoveOne : public Remove {
+ void doIt() const {
+ client()->remove( ns(), q_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ }
+ };
+
+ class FailingUpdate : public Base {
+ public:
+ FailingUpdate() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,c:'d'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), o_, u_ );
+ client()->insert( ns(), o_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( o_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj o_, u_;
+ };
+
+ class SetNumToStr : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$set" << BSON( "a" << "bcd" ) ) );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( BSON( "_id" << 0 << "a" << "bcd" ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( BSON( "_id" << 0 << "a" << 4.0 ) );
+ }
+ };
+
+ class Push : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class PushUpsert : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ), true );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class MultiPush : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) << "$push" << BSON( "b.c" << 6.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5],b:{c:[6]}}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class EmptyPush : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0}" ) );
+ }
+ };
+
+ class PushAll : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class PushAllUpsert : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ), true );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class EmptyPushAll : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0}" ) );
+ }
+ };
+
+ class Pull : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$pull" << BSON( "a" << 4.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5]}" ) );
+ }
+ };
+
+ class PullNothing : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$pull" << BSON( "a" << 6.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5]}" ) );
+ }
+ };
+
+ class PullAll : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pullAll:{a:[4,5]}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
+ }
+ };
+
+ class Pop : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pop:{a:1}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
+ }
+ };
+
+ class PopReverse : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pop:{a:-1}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
+ }
+ };
+
+ class BitOp : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$bit:{a:{and:2,or:8}}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << ( ( 3 & 2 ) | 8 ) ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3}" ) );
+ }
+ };
+
+ class Rename : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << 50 << "b" << 3 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3}" ) );
+ }
+ };
+
+ class RenameReplace : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << 50 << "b" << 3 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3,b:100}" ) );
+ }
+ };
+
+ class RenameOverwrite : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "b" << 3 << "z" << 1 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,z:1,a:3}" ) );
+ }
+ };
+
+ class NoRename : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{c:'b'},$set:{z:1}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << 3 << "z" << 1 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3}" ) );
+ }
+ };
+
+
+ } // namespace Idempotence
+
+ class DeleteOpIsIdBased : public Base {
+ public:
+ void run() {
+ insert( BSON( "_id" << 0 << "a" << 10 ) );
+ insert( BSON( "_id" << 1 << "a" << 11 ) );
+ insert( BSON( "_id" << 3 << "a" << 10 ) );
+ client()->remove( ns(), BSON( "a" << 10 ) );
+ ASSERT_EQUALS( 1U, client()->count( ns(), BSONObj() ) );
+ insert( BSON( "_id" << 0 << "a" << 11 ) );
+ insert( BSON( "_id" << 2 << "a" << 10 ) );
+ insert( BSON( "_id" << 3 << "a" << 10 ) );
+
+ applyAllOperations();
+ ASSERT_EQUALS( 2U, client()->count( ns(), BSONObj() ) );
+ ASSERT( !one( BSON( "_id" << 1 ) ).isEmpty() );
+ ASSERT( !one( BSON( "_id" << 2 ) ).isEmpty() );
+ }
+ };
+
+ class DatabaseIgnorerBasic {
+ public:
+ void run() {
+ DatabaseIgnorer d;
+ ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ ASSERT( !d.ignoreAt( "b", OpTime( 4, 0 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 4, 10 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 5, 1 ) ) );
+ // Ignore state is expired.
+ ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ }
+ };
+
+ class DatabaseIgnorerUpdate {
+ public:
+ void run() {
+ DatabaseIgnorer d;
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
+
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
+ }
+ };
+
+ /**
+ * Check against oldest document in the oplog before scanning backward
+ * from the newest document.
+ */
+ class FindingStartCursorStale : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ client()->insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( cllNS() );
+ NamespaceDetails *nsd = nsdetails( cllNS() );
+ BSONObjBuilder b;
+ b.appendTimestamp( "$gte" );
+ BSONObj query = BSON( "ts" << b.obj() );
+ FieldRangeSetPair frsp( cllNS(), query );
+ BSONObj order = BSON( "$natural" << 1 );
+ QueryPlan qp( nsd, -1, frsp, &frsp, query, order );
+ FindingStartCursor fsc( qp );
+ ASSERT( fsc.done() );
+ ASSERT_EQUALS( 0, fsc.cursor()->current()[ "o" ].Obj()[ "_id" ].Int() );
+ }
+ };
+
+ /** Check unsuccessful yield recovery with FindingStartCursor */
+ class FindingStartCursorYield : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ client()->insert( ns(), BSON( "_id" << i ) );
+ }
+ Date_t ts = client()->query( "local.oplog.$main", Query().sort( BSON( "$natural" << 1 ) ), 1, 4 )->next()[ "ts" ].date();
+ Client::Context ctx( cllNS() );
+ NamespaceDetails *nsd = nsdetails( cllNS() );
+ BSONObjBuilder b;
+ b.appendDate( "$gte", ts );
+ BSONObj query = BSON( "ts" << b.obj() );
+ FieldRangeSetPair frsp( cllNS(), query );
+ BSONObj order = BSON( "$natural" << 1 );
+ QueryPlan qp( nsd, -1, frsp, &frsp, query, order );
+ FindingStartCursor fsc( qp );
+ ASSERT( !fsc.done() );
+ fsc.next();
+ ASSERT( !fsc.done() );
+ ASSERT( fsc.prepareToYield() );
+ ClientCursor::invalidate( "local.oplog.$main" );
+ ASSERT_THROWS( fsc.recoverFromYield(), MsgAssertionException );
+ }
+ };
+
+ /** Check ReplSetConfig::MemberCfg equality */
+ class ReplSetMemberCfgEquality : public Base {
+ public:
+ void run() {
+ ReplSetConfig::MemberCfg m1, m2;
+ assert(m1 == m2);
+ m1.tags["x"] = "foo";
+ assert(m1 != m2);
+ m2.tags["y"] = "bar";
+ assert(m1 != m2);
+ m1.tags["y"] = "bar";
+ assert(m1 != m2);
+ m2.tags["x"] = "foo";
+ assert(m1 == m2);
+ m1.tags.clear();
+ assert(m1 != m2);
+ }
+ };
+
+ class SyncTest : public Sync {
+ public:
+ bool returnEmpty;
+ SyncTest() : Sync(""), returnEmpty(false) {}
+ virtual ~SyncTest() {}
+ virtual BSONObj getMissingDoc(const BSONObj& o) {
+ if (returnEmpty) {
+ BSONObj o;
+ return o;
+ }
+ return BSON("_id" << "on remote" << "foo" << "baz");
+ }
+ };
+
+ class ShouldRetry : public Base {
+ public:
+ void run() {
+ bool threw = false;
+ BSONObj o = BSON("ns" << ns() << "o" << BSON("foo" << "bar") << "o2" << BSON("_id" << "in oplog" << "foo" << "bar"));
+
+ // this should fail because we can't connect
+ try {
+ Sync badSource("localhost:123");
+ badSource.getMissingDoc(o);
+ }
+ catch (DBException&) {
+ threw = true;
+ }
+ assert(threw);
+
+ // now this should succeed
+ SyncTest t;
+ assert(t.shouldRetry(o));
+ assert(!client()->findOne(ns(), BSON("_id" << "on remote")).isEmpty());
+
+ // force it not to find an obj
+ t.returnEmpty = true;
+ assert(!t.shouldRetry(o));
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "repl" ) {
+ }
+
+ void setupTests() {
+ add< LogBasic >();
+ add< Idempotence::InsertTimestamp >();
+ add< Idempotence::InsertAutoId >();
+ add< Idempotence::InsertWithId >();
+ add< Idempotence::InsertTwo >();
+ add< Idempotence::InsertTwoIdentical >();
+ add< Idempotence::UpdateTimestamp >();
+ add< Idempotence::UpdateSameField >();
+ add< Idempotence::UpdateSameFieldWithId >();
+ add< Idempotence::UpdateSameFieldExplicitId >();
+ add< Idempotence::UpdateDifferentFieldExplicitId >();
+ add< Idempotence::UpsertUpdateNoMods >();
+ add< Idempotence::UpsertInsertNoMods >();
+ add< Idempotence::UpdateSet >();
+ add< Idempotence::UpdateInc >();
+ add< Idempotence::UpdateInc2 >();
+ add< Idempotence::IncEmbedded >(); // SERVER-716
+ add< Idempotence::IncCreates >(); // SERVER-717
+ add< Idempotence::UpsertInsertIdMod >();
+ add< Idempotence::UpsertInsertSet >();
+ add< Idempotence::UpsertInsertInc >();
+ add< Idempotence::MultiInc >();
+ // Don't worry about this until someone wants this functionality.
+// add< Idempotence::UpdateWithoutPreexistingId >();
+ add< Idempotence::Remove >();
+ add< Idempotence::RemoveOne >();
+ add< Idempotence::FailingUpdate >();
+ add< Idempotence::SetNumToStr >();
+ add< Idempotence::Push >();
+ add< Idempotence::PushUpsert >();
+ add< Idempotence::MultiPush >();
+ add< Idempotence::EmptyPush >();
+ add< Idempotence::PushAll >();
+ add< Idempotence::PushAllUpsert >();
+ add< Idempotence::EmptyPushAll >();
+ add< Idempotence::Pull >();
+ add< Idempotence::PullNothing >();
+ add< Idempotence::PullAll >();
+ add< Idempotence::Pop >();
+ add< Idempotence::PopReverse >();
+ add< Idempotence::BitOp >();
+ add< Idempotence::Rename >();
+ add< Idempotence::RenameReplace >();
+ add< Idempotence::RenameOverwrite >();
+ add< Idempotence::NoRename >();
+ add< DeleteOpIsIdBased >();
+ add< DatabaseIgnorerBasic >();
+ add< DatabaseIgnorerUpdate >();
+ add< FindingStartCursorStale >();
+ add< FindingStartCursorYield >();
+ add< ReplSetMemberCfgEquality >();
+ add< ShouldRetry >();
+ }
+ } myall;
+
+} // namespace ReplTests
+
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
new file mode 100644
index 00000000000..19edd5537ab
--- /dev/null
+++ b/src/mongo/dbtests/sharding.cpp
@@ -0,0 +1,56 @@
+// sharding.cpp : some unit tests for sharding internals
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "dbtests.h"
+
+#include "../client/parallel.h"
+
+namespace ShardingTests {
+
+ namespace serverandquerytests {
+ class test1 {
+ public:
+ void run() {
+ ServerAndQuery a( "foo:1" , BSON( "a" << GT << 0 << LTE << 100 ) );
+ ServerAndQuery b( "foo:1" , BSON( "a" << GT << 200 << LTE << 1000 ) );
+
+ ASSERT( a < b );
+ ASSERT( ! ( b < a ) );
+
+ set<ServerAndQuery> s;
+ s.insert( a );
+ s.insert( b );
+
+ ASSERT_EQUALS( (unsigned int)2 , s.size() );
+ }
+ };
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "sharding" ) {
+ }
+
+ void setupTests() {
+ add< serverandquerytests::test1 >();
+ }
+ } myall;
+
+}
diff --git a/src/mongo/dbtests/socktests.cpp b/src/mongo/dbtests/socktests.cpp
new file mode 100644
index 00000000000..176db8c8e95
--- /dev/null
+++ b/src/mongo/dbtests/socktests.cpp
@@ -0,0 +1,48 @@
+// socktests.cpp : sock.{h,cpp} unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../util/net/sock.h"
+#include "dbtests.h"
+
+namespace SockTests {
+
+ class HostByName {
+ public:
+ void run() {
+ ASSERT_EQUALS( "127.0.0.1", hostbyname( "localhost" ) );
+ ASSERT_EQUALS( "127.0.0.1", hostbyname( "127.0.0.1" ) );
+ // ASSERT_EQUALS( "::1", hostbyname( "::1" ) ); // IPv6 disabled at runtime by default.
+
+ HostAndPort h("asdfasdfasdf_no_such_host");
+ // this fails uncomment when fixed.
+ ASSERT( !h.isSelf() );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "sock" ) {}
+ void setupTests() {
+ add< HostByName >();
+ }
+ } myall;
+
+} // namespace SockTests
+
diff --git a/src/mongo/dbtests/spin_lock_test.cpp b/src/mongo/dbtests/spin_lock_test.cpp
new file mode 100644
index 00000000000..ed1f1ae1ca5
--- /dev/null
+++ b/src/mongo/dbtests/spin_lock_test.cpp
@@ -0,0 +1,114 @@
+// spin_lock_test.cpp : spin_lcok.{h, cpp} unit test
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include <boost/thread/thread.hpp>
+#include "dbtests.h"
+#include "../util/concurrency/spin_lock.h"
+#include "../util/timer.h"
+
+namespace {
+
+ using mongo::SpinLock;
+
+ class LockTester {
+ public:
+ LockTester( SpinLock* spin, int* counter )
+ : _spin(spin), _counter(counter), _requests(0) {}
+
+ ~LockTester() {
+ delete _t;
+ }
+
+ void start( int increments ) {
+ _t = new boost::thread( boost::bind(&LockTester::test, this, increments) );
+ }
+
+ void join() {
+ if ( _t ) _t->join();
+ }
+
+ int requests() const {
+ return _requests;
+ }
+
+ private:
+ SpinLock* _spin; // not owned here
+ int* _counter; // not owned here
+ int _requests;
+ boost::thread* _t;
+
+ void test( int increments ) {
+ while ( increments-- > 0 ) {
+ _spin->lock();
+ ++(*_counter);
+ ++_requests;
+ _spin->unlock();
+ }
+ }
+
+ LockTester( LockTester& );
+ LockTester& operator=( LockTester& );
+ };
+
+ class ConcurrentIncs {
+ public:
+ void run() {
+
+ SpinLock spin;
+ int counter = 0;
+
+ const int threads = 64;
+ const int incs = 50000;
+ LockTester* testers[threads];
+
+ Timer timer;
+
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i] = new LockTester( &spin, &counter );
+ }
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i]->start( incs );
+ }
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i]->join();
+ ASSERT_EQUALS( testers[i]->requests(), incs );
+ delete testers[i];
+ }
+
+ int ms = timer.millis();
+ log() << "spinlock ConcurrentIncs time: " << ms << endl;
+
+ ASSERT_EQUALS( counter, threads*incs );
+#if defined(__linux__)
+ ASSERT( SpinLock::isfast() );
+#endif
+
+ }
+ };
+
+ class SpinLockSuite : public Suite {
+ public:
+ SpinLockSuite() : Suite( "spinlock" ) {}
+
+ void setupTests() {
+ add< ConcurrentIncs >();
+ }
+ } spinLockSuite;
+
+} // anonymous namespace
diff --git a/src/mongo/dbtests/test.sln b/src/mongo/dbtests/test.sln
new file mode 100755
index 00000000000..3a1b741c716
--- /dev/null
+++ b/src/mongo/dbtests/test.sln
@@ -0,0 +1,26 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcxproj", "{215B2D68-0A70-4D10-8E75-B33010C62A91}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Debug|x64 = Debug|x64
+ Release|Win32 = Release|Win32
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.ActiveCfg = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.Build.0 = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.Build.0 = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.ActiveCfg = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.Build.0 = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/src/mongo/dbtests/test.vcxproj b/src/mongo/dbtests/test.vcxproj
new file mode 100644
index 00000000000..c5d1aad61e9
--- /dev/null
+++ b/src/mongo/dbtests/test.vcxproj
@@ -0,0 +1,776 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{215B2D68-0A70-4D10-8E75-B33010C62A91}</ProjectGuid>
+ <RootNamespace>dbtests</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ <Profile>true</Profile>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_DURABLE;_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
+ <ClInclude Include="..\bson\bson-inl.h" />
+ <ClInclude Include="..\bson\bson.h" />
+ <ClInclude Include="..\bson\bsonelement.h" />
+ <ClInclude Include="..\bson\bsonmisc.h" />
+ <ClInclude Include="..\bson\bsonobj.h" />
+ <ClInclude Include="..\bson\bsonobjbuilder.h" />
+ <ClInclude Include="..\bson\bsonobjiterator.h" />
+ <ClInclude Include="..\bson\bsontypes.h" />
+ <ClInclude Include="..\bson\bson_db.h" />
+ <ClInclude Include="..\bson\inline_decls.h" />
+ <ClInclude Include="..\bson\oid.h" />
+ <ClInclude Include="..\bson\ordering.h" />
+ <ClInclude Include="..\bson\stringdata.h" />
+ <ClInclude Include="..\client\dbclientmockcursor.h" />
+ <ClInclude Include="..\db\collection.h" />
+ <ClInclude Include="..\db\databaseholder.h" />
+ <ClInclude Include="..\db\dur.h" />
+ <ClInclude Include="..\db\durop.h" />
+ <ClInclude Include="..\db\dur_journal.h" />
+ <ClInclude Include="..\db\jsobjmanipulator.h" />
+ <ClInclude Include="..\db\mongommf.h" />
+ <ClInclude Include="..\db\mongomutex.h" />
+ <ClInclude Include="..\db\ops\count.h" />
+ <ClInclude Include="..\db\ops\delete.h" />
+ <ClInclude Include="..\db\ops\query.h" />
+ <ClInclude Include="..\db\ops\update.h" />
+ <ClInclude Include="..\db\pagefault.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcrecpp.h" />
+ <ClInclude Include="..\server.h" />
+ <ClInclude Include="..\targetver.h" />
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="..\db\btree.h" />
+ <ClInclude Include="..\db\clientcursor.h" />
+ <ClInclude Include="..\db\cmdline.h" />
+ <ClInclude Include="..\db\commands.h" />
+ <ClInclude Include="..\db\concurrency.h" />
+ <ClInclude Include="..\db\curop.h" />
+ <ClInclude Include="..\db\cursor.h" />
+ <ClInclude Include="..\db\database.h" />
+ <ClInclude Include="..\db\db.h" />
+ <ClInclude Include="..\db\dbhelpers.h" />
+ <ClInclude Include="..\db\dbinfo.h" />
+ <ClInclude Include="..\db\dbmessage.h" />
+ <ClInclude Include="..\db\diskloc.h" />
+ <ClInclude Include="..\db\extsort.h" />
+ <ClInclude Include="..\db\introspect.h" />
+ <ClInclude Include="..\db\jsobj.h" />
+ <ClInclude Include="..\db\json.h" />
+ <ClInclude Include="..\db\matcher.h" />
+ <ClInclude Include="..\grid\message.h" />
+ <ClInclude Include="..\db\minilex.h" />
+ <ClInclude Include="..\db\namespace.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="..\db\pdfile.h" />
+ <ClInclude Include="..\grid\protocol.h" />
+ <ClInclude Include="..\db\query.h" />
+ <ClInclude Include="..\db\queryoptimizer.h" />
+ <ClInclude Include="..\db\repl.h" />
+ <ClInclude Include="..\db\replset.h" />
+ <ClInclude Include="..\db\resource.h" />
+ <ClInclude Include="..\db\scanandorder.h" />
+ <ClInclude Include="..\db\security.h" />
+ <ClInclude Include="..\third_party\snappy\config.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-c.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-internal.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-sinksource.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-internal.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-public.h" />
+ <ClInclude Include="..\third_party\snappy\snappy.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\checksum.h" />
+ <ClInclude Include="..\util\compress.h" />
+ <ClInclude Include="..\util\concurrency\list.h" />
+ <ClInclude Include="..\util\concurrency\task.h" />
+ <ClInclude Include="..\util\concurrency\value.h" />
+ <ClInclude Include="..\util\file.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\util\hashtab.h" />
+ <ClInclude Include="..\db\lasterror.h" />
+ <ClInclude Include="..\util\log.h" />
+ <ClInclude Include="..\util\logfile.h" />
+ <ClInclude Include="..\util\lruishmap.h" />
+ <ClInclude Include="..\util\md5.h" />
+ <ClInclude Include="..\util\md5.hpp" />
+ <ClInclude Include="..\util\miniwebserver.h" />
+ <ClInclude Include="..\util\mmap.h" />
+ <ClInclude Include="..\util\mongoutils\hash.h" />
+ <ClInclude Include="..\util\sock.h" />
+ <ClInclude Include="..\util\unittest.h" />
+ <ClInclude Include="framework.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\client\gridfs.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="..\db\btreebuilder.cpp" />
+ <ClCompile Include="..\db\cap.cpp" />
+ <ClCompile Include="..\db\commands\isself.cpp" />
+ <ClCompile Include="..\db\compact.cpp" />
+ <ClCompile Include="..\db\curop.cpp" />
+ <ClCompile Include="..\db\dbcommands_admin.cpp" />
+ <ClCompile Include="..\db\dbcommands_generic.cpp" />
+ <ClCompile Include="..\db\dur.cpp" />
+ <ClCompile Include="..\db\durop.cpp" />
+ <ClCompile Include="..\db\dur_commitjob.cpp" />
+ <ClCompile Include="..\db\dur_journal.cpp" />
+ <ClCompile Include="..\db\dur_preplogbuffer.cpp" />
+ <ClCompile Include="..\db\dur_recover.cpp" />
+ <ClCompile Include="..\db\dur_writetodatafiles.cpp" />
+ <ClCompile Include="..\db\d_concurrency.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Use</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\d_globals.cpp" />
+ <ClCompile Include="..\db\geo\2d.cpp" />
+ <ClCompile Include="..\db\geo\haystack.cpp" />
+ <ClCompile Include="..\db\key.cpp" />
+ <ClCompile Include="..\db\mongommf.cpp" />
+ <ClCompile Include="..\db\ops\count.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\delete.cpp" />
+ <ClCompile Include="..\db\ops\query.cpp" />
+ <ClCompile Include="..\db\ops\update.cpp" />
+ <ClCompile Include="..\db\pagefault.cpp" />
+ <ClCompile Include="..\db\projection.cpp" />
+ <ClCompile Include="..\db\queryoptimizercursor.cpp" />
+ <ClCompile Include="..\db\querypattern.cpp">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\db\record.cpp" />
+ <ClCompile Include="..\db\repl\consensus.cpp" />
+ <ClCompile Include="..\db\repl\heartbeat.cpp" />
+ <ClCompile Include="..\db\repl\manager.cpp" />
+ <ClCompile Include="..\db\repl\rs.cpp" />
+ <ClCompile Include="..\db\repl\rs_initialsync.cpp" />
+ <ClCompile Include="..\db\repl\rs_initiate.cpp" />
+ <ClCompile Include="..\db\repl\rs_rollback.cpp" />
+ <ClCompile Include="..\db\repl\rs_sync.cpp" />
+ <ClCompile Include="..\db\restapi.cpp" />
+ <ClCompile Include="..\db\scanandorder.cpp" />
+ <ClCompile Include="..\db\security_common.cpp" />
+ <ClCompile Include="..\s\default_version.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\db\btree.cpp" />
+ <ClCompile Include="..\db\btreecursor.cpp" />
+ <ClCompile Include="..\pch.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\client.cpp" />
+ <ClCompile Include="..\db\clientcursor.cpp" />
+ <ClCompile Include="..\db\cloner.cpp" />
+ <ClCompile Include="..\db\commands\cloud.cpp">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\db\commands.cpp" />
+ <ClCompile Include="..\db\common.cpp">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\cursor.cpp" />
+ <ClCompile Include="..\db\database.cpp" />
+ <ClCompile Include="..\db\dbcommands.cpp" />
+ <ClCompile Include="..\db\dbeval.cpp" />
+ <ClCompile Include="..\db\dbhelpers.cpp" />
+ <ClCompile Include="..\db\dbwebserver.cpp" />
+ <ClCompile Include="..\db\extsort.cpp" />
+ <ClCompile Include="..\db\index.cpp" />
+ <ClCompile Include="..\db\indexkey.cpp" />
+ <ClCompile Include="..\db\instance.cpp" />
+ <ClCompile Include="..\db\introspect.cpp" />
+ <ClCompile Include="..\db\jsobj.cpp" />
+ <ClCompile Include="..\db\json.cpp" />
+ <ClCompile Include="..\db\lasterror.cpp" />
+ <ClCompile Include="..\db\matcher.cpp" />
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\s\chunk.cpp" />
+ <ClCompile Include="..\s\config.cpp" />
+ <ClCompile Include="..\s\d_chunk_manager.cpp" />
+ <ClCompile Include="..\s\d_migrate.cpp" />
+ <ClCompile Include="..\s\d_split.cpp" />
+ <ClCompile Include="..\s\d_state.cpp" />
+ <ClCompile Include="..\s\d_writeback.cpp" />
+ <ClCompile Include="..\s\grid.cpp" />
+ <ClCompile Include="..\s\shard.cpp" />
+ <ClCompile Include="..\s\shardconnection.cpp" />
+ <ClCompile Include="..\s\shardkey.cpp" />
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\synchronization.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\file_allocator.cpp" />
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\logfile.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="..\db\namespace.cpp" />
+ <ClCompile Include="..\db\nonce.cpp" />
+ <ClCompile Include="..\db\pdfile.cpp" />
+ <ClCompile Include="..\db\queryoptimizer.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\db\repl.cpp" />
+ <ClCompile Include="..\db\security.cpp" />
+ <ClCompile Include="..\db\security_commands.cpp" />
+ <ClCompile Include="..\db\tests.cpp" />
+ <ClCompile Include="..\db\cmdline.cpp" />
+ <ClCompile Include="..\db\dbmessage.cpp" />
+ <ClCompile Include="..\db\matcher_covered.cpp" />
+ <ClCompile Include="..\db\oplog.cpp" />
+ <ClCompile Include="..\db\queryutil.cpp" />
+ <ClCompile Include="..\db\repl_block.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\util\md5.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeaderFile>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeaderFile>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp" />
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="..\s\d_logic.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\shell\mongo.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="basictests.cpp" />
+ <ClCompile Include="btreetests.cpp" />
+ <ClCompile Include="clienttests.cpp" />
+ <ClCompile Include="cursortests.cpp" />
+ <ClCompile Include="dbtests.cpp" />
+ <ClCompile Include="directclienttests.cpp" />
+ <ClCompile Include="d_chunk_manager_tests.cpp" />
+ <ClCompile Include="framework.cpp" />
+ <ClCompile Include="jsobjtests.cpp" />
+ <ClCompile Include="jsontests.cpp" />
+ <ClCompile Include="jstests.cpp" />
+ <ClCompile Include="matchertests.cpp" />
+ <ClCompile Include="mmaptests.cpp" />
+ <ClCompile Include="namespacetests.cpp" />
+ <ClCompile Include="pdfiletests.cpp" />
+ <ClCompile Include="perftests.cpp" />
+ <ClCompile Include="queryoptimizercursortests.cpp" />
+ <ClCompile Include="queryoptimizertests.cpp" />
+ <ClCompile Include="querytests.cpp" />
+ <ClCompile Include="repltests.cpp" />
+ <ClCompile Include="socktests.cpp" />
+ <ClCompile Include="spin_lock_test.cpp" />
+ <ClCompile Include="threadedtests.cpp">
+ <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ </ClCompile>
+ <ClCompile Include="updatetests.cpp" />
+ <ClCompile Include="..\db\stats\counters.cpp" />
+ <ClCompile Include="..\db\stats\snapshots.cpp" />
+ <ClCompile Include="..\db\stats\top.cpp" />
+ <ClCompile Include="..\db\repl\health.cpp" />
+ <ClCompile Include="..\db\repl\replset_commands.cpp" />
+ <ClCompile Include="..\db\repl\rs_config.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\SConstruct" />
+ <None Include="btreetests.inl" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/dbtests/test.vcxproj.filters b/src/mongo/dbtests/test.vcxproj.filters
new file mode 100755
index 00000000000..a692d0ca692
--- /dev/null
+++ b/src/mongo/dbtests/test.vcxproj.filters
@@ -0,0 +1,939 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="misc and third party">
+ <UniqueIdentifier>{17c97725-06a4-41a6-bc1c-f0e05eada682}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="misc and third party">
+ <UniqueIdentifier>{0a50fb63-4ac3-4e30-a9d4-b0841878ee73}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="client">
+ <UniqueIdentifier>{45dab36c-864e-45de-bb8e-cf1d87a2c4f6}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="db">
+ <UniqueIdentifier>{69e233b0-5354-4612-8474-d4e4faaee607}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="db\cpp">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="db\h">
+ <UniqueIdentifier>{f86d2fc9-fb76-40cf-943d-330feb945ff3}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util">
+ <UniqueIdentifier>{0ec2e082-aace-46da-9898-a1a7b24d60b7}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util\cpp">
+ <UniqueIdentifier>{12efa241-3593-4177-a0cb-1eb672491f49}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="shard">
+ <UniqueIdentifier>{3865c5a5-bdb1-4420-a3ae-5a6615d563d4}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="scripting">
+ <UniqueIdentifier>{28893dc5-8a18-429a-b5c9-2cf701d324da}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="dbtests">
+ <UniqueIdentifier>{bc08b47a-daa3-4894-b9af-ae88755838db}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="stats">
+ <UniqueIdentifier>{2b914dc3-a760-4397-a12b-73a0381fa71d}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="replsets">
+ <UniqueIdentifier>{9320a670-3b28-471a-bf92-6c8d881a37a4}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util\concurrency">
+ <UniqueIdentifier>{d499fdba-b256-4b12-af20-cdd1ae1addff}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util\h">
+ <UniqueIdentifier>{353b6f01-1cab-4156-a576-bc75ab204776}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="btree">
+ <UniqueIdentifier>{4fff2dbf-30c4-4295-8db8-d513c1e36220}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="dur">
+ <UniqueIdentifier>{c296d097-0d46-46ee-9097-f2df659d9596}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="bson">
+ <UniqueIdentifier>{e6652333-c77f-420c-af8e-72d55bc095fe}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="misc and third party\snappy">
+ <UniqueIdentifier>{fbc4416f-ca67-4e63-a1ea-49027de7e080}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\targetver.h">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\connpool.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\dbclient.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\model.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\clientcursor.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\cmdline.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\commands.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\concurrency.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\curop.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\cursor.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\database.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\db.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dbhelpers.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dbinfo.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dbmessage.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\diskloc.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\extsort.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\introspect.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\jsobj.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\json.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\matcher.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\grid\message.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\minilex.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\namespace.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\pch.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pdfile.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\grid\protocol.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\query.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\queryoptimizer.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\repl.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\replset.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\resource.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\scanandorder.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\security.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\btree.h">
+ <Filter>btree</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\concurrency\list.h">
+ <Filter>util\concurrency</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\concurrency\value.h">
+ <Filter>util\concurrency</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\concurrency\task.h">
+ <Filter>util\concurrency</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\builder.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\unittest.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\file.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\goodies.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\hashtab.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\lasterror.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\log.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\lruishmap.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\md5.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\md5.hpp">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\miniwebserver.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\mmap.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\sock.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dur.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dur_journal.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\logfile.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\mongommf.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\durop.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\jsobjmanipulator.h">
+ <Filter>db</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\mongomutex.h">
+ <Filter>db</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\mongoutils\hash.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\checksum.h">
+ <Filter>util</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson_db.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonelement.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson-inl.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonmisc.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobj.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobjbuilder.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobjiterator.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsontypes.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\inline_decls.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\oid.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\ordering.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\stringdata.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\delete.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\update.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\query.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\server.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\config.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-c.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-internal.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-sinksource.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-internal.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-public.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\compress.h">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\pcre-7.4\pcrecpp.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\db\collection.h" />
+ <ClInclude Include="..\db\databaseholder.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="framework.h" />
+ <ClInclude Include="..\db\ops\count.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\dbclientmockcursor.h" />
+ <ClInclude Include="..\db\pagefault.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js64r.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ <Library Include="..\..\js\js32d.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\client\connpool.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclientcursor.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\syncclusterconnection.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\pch.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\client.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\clientcursor.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cloner.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\common.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cursor.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\database.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbcommands.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbeval.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbhelpers.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbwebserver.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\extsort.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\index.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\indexkey.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\instance.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\introspect.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\jsobj.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\json.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\lasterror.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\matcher.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\mmap_win.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\namespace.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\nonce.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pdfile.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryoptimizer.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security_commands.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\tests.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cmdline.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\matcher_covered.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\oplog.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryutil.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl_block.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\assert_util.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\background.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\base64.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\md5.c">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\mmap.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\processinfo_win32.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\util.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_logic.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\engine.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\shell\mongo.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\utils.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="basictests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="btreetests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="clienttests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="cursortests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="dbtests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="framework.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="jsobjtests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="jsontests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="jstests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="matchertests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="namespacetests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="pdfiletests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="queryoptimizertests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="querytests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="repltests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="socktests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="threadedtests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="updatetests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\counters.cpp">
+ <Filter>stats</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\snapshots.cpp">
+ <Filter>stats</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\top.cpp">
+ <Filter>stats</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\consensus.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\health.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\replset_commands.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_config.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\btree.cpp">
+ <Filter>btree</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\btreecursor.cpp">
+ <Filter>btree</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\manager.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_initiate.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\vars.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\task.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\heartbeat.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\shardconnection.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\version.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\text.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\gridfs.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_writeback.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_state.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\geo\2d.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\chunk.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\config.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\shardkey.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\shard.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\model.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\parallel.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\stringutils.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\distlock.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_migrate.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_split.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_rollback.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_sync.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_initialsync.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\geo\haystack.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cap.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\log.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\processinfo.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\grid.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\restapi.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="mmaptests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\bench.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\compact.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands\isself.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_journal.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\logfile.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\mongommf.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\projection.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_chunk_manager.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_recover.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\durop.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbcommands_generic.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\bson\oid.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\synchronization.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_commitjob.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_writetodatafiles.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient_rs.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_preplogbuffer.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="perftests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="directclienttests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\file_allocator.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbcommands_admin.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\querypattern.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\ramlog.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\key.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\btreebuilder.cpp">
+ <Filter>btree</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryoptimizercursor.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\record.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\delete.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\update.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security_common.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\query.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbmessage.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\listen.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_server_port.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_port.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\miniwebserver.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\sock.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="spin_lock_test.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <Filter>misc and third party\snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <Filter>misc and third party</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <Filter>misc and third party\snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\scanandorder.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands\cloud.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\d_concurrency.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="d_chunk_manager_tests.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="queryoptimizercursortests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\default_version.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\count.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pagefault.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\d_globals.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\curop.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\SConstruct">
+ <Filter>misc and third party</Filter>
+ </None>
+ <None Include="btreetests.inl">
+ <Filter>dbtests</Filter>
+ </None>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
new file mode 100644
index 00000000000..1304a276b7d
--- /dev/null
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -0,0 +1,649 @@
+// threadedtests.cpp - Tests for threaded code
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../bson/util/atomic_int.h"
+#include "../util/concurrency/mvar.h"
+#include "../util/concurrency/thread_pool.h"
+#include "../util/concurrency/list.h"
+#include "../util/timer.h"
+#include <boost/thread.hpp>
+#include <boost/bind.hpp>
+#include "../db/d_concurrency.h"
+
+#include "dbtests.h"
+
+namespace ThreadedTests {
+
+ template <int nthreads_param=10>
+ class ThreadedTest {
+ public:
+ virtual void setup() {} //optional
+ virtual void subthread(int remaining) = 0; // each thread whatever test work you want done
+ virtual void validate() = 0; // after work is done
+
+ static const int nthreads = nthreads_param;
+
+ void run() {
+ setup();
+ launch_subthreads(nthreads);
+ validate();
+ }
+
+ virtual ~ThreadedTest() {}; // not necessary, but makes compilers happy
+
+ private:
+ void launch_subthreads(int remaining) {
+ if (!remaining)
+ return;
+
+ boost::thread athread(boost::bind(&ThreadedTest::subthread, this, remaining));
+ launch_subthreads(remaining - 1);
+ athread.join();
+ }
+ };
+
+ class MongoMutexTest : public ThreadedTest<135> {
+#if defined(_DEBUG)
+ enum { N = 5000 };
+#else
+ enum { N = 40000 };
+#endif
+ MongoMutex *mm;
+ ProgressMeter pm;
+ public:
+ MongoMutexTest() : pm(N * nthreads) {}
+ void run() {
+ DEV {
+ // in _DEBUG builds on linux we mprotect each time a writelock
+ // is taken. That can greatly slow down this test if there are
+ // many open files
+ DBDirectClient db;
+ db.simpleCommand("admin", NULL, "closeAllDatabases");
+ }
+
+ Timer t;
+ cout << "MongoMutexTest N:" << N << endl;
+ ThreadedTest<135>::run();
+ cout << "MongoMutexTest " << t.millis() << "ms" << endl;
+ }
+ private:
+ virtual void setup() {
+ mm = &d.dbMutex;
+ }
+ virtual void subthread(int) {
+ Client::initThread("mongomutextest");
+ sleepmillis(0);
+ for( int i = 0; i < N; i++ ) {
+ if( i % 7 == 0 ) {
+ mm->lock_shared();
+ mm->lock_shared();
+ mm->unlock_shared();
+ mm->unlock_shared();
+ }
+ else if( i % 7 == 1 ) {
+ mm->lock_shared();
+ ASSERT( mm->atLeastReadLocked() );
+ mm->unlock_shared();
+ }
+ else if( i % 7 == 2 ) {
+ mm->lock();
+ ASSERT( mm->isWriteLocked() );
+ mm->unlock();
+ }
+ else if( i % 7 == 3 ) {
+ mm->lock();
+ mm->lock_shared();
+ ASSERT( mm->isWriteLocked() );
+ mm->unlock_shared();
+ mm->unlock();
+ }
+ else if( i % 7 == 4 ) {
+ mm->lock();
+ mm->releaseEarly();
+ mm->unlock();
+ }
+ else if( i % 7 == 5 ) {
+ if( mm->lock_try(1) ) {
+ mm->unlock();
+ }
+ }
+ else if( i % 7 == 6 ) {
+ if( mm->lock_shared_try(0) ) {
+ mm->unlock_shared();
+ }
+ }
+ else {
+ mm->lock_shared();
+ mm->unlock_shared();
+ }
+ pm.hit();
+ }
+ cc().shutdown();
+ }
+ virtual void validate() {
+ ASSERT( !mm->atLeastReadLocked() );
+ mm->lock();
+ mm->unlock();
+ mm->lock_shared();
+ mm->unlock_shared();
+ }
+ };
+
+ // Tested with up to 30k threads
+ class IsAtomicUIntAtomic : public ThreadedTest<> {
+ static const int iterations = 1000000;
+ AtomicUInt target;
+
+ void subthread(int) {
+ for(int i=0; i < iterations; i++) {
+ //target.x++; // verified to fail with this version
+ target++;
+ }
+ }
+ void validate() {
+ ASSERT_EQUALS(target.x , unsigned(nthreads * iterations));
+
+ AtomicUInt u;
+ ASSERT_EQUALS(0u, u);
+ ASSERT_EQUALS(0u, u++);
+ ASSERT_EQUALS(2u, ++u);
+ ASSERT_EQUALS(2u, u--);
+ ASSERT_EQUALS(0u, --u);
+ ASSERT_EQUALS(0u, u);
+
+ u++;
+ ASSERT( u > 0 );
+
+ u--;
+ ASSERT( ! ( u > 0 ) );
+ }
+ };
+
+ class MVarTest : public ThreadedTest<> {
+ static const int iterations = 10000;
+ MVar<int> target;
+
+ public:
+ MVarTest() : target(0) {}
+ void subthread(int) {
+ for(int i=0; i < iterations; i++) {
+ int val = target.take();
+#if BOOST_VERSION >= 103500
+ //increase chances of catching failure
+ boost::this_thread::yield();
+#endif
+ target.put(val+1);
+ }
+ }
+ void validate() {
+ ASSERT_EQUALS(target.take() , nthreads * iterations);
+ }
+ };
+
+ class ThreadPoolTest {
+ static const int iterations = 10000;
+ static const int nThreads = 8;
+
+ AtomicUInt counter;
+ void increment(int n) {
+ for (int i=0; i<n; i++) {
+ counter++;
+ }
+ }
+
+ public:
+ void run() {
+ ThreadPool tp(nThreads);
+
+ for (int i=0; i < iterations; i++) {
+ tp.schedule(&ThreadPoolTest::increment, this, 2);
+ }
+
+ tp.join();
+
+ ASSERT(counter == (unsigned)(iterations * 2));
+ }
+ };
+
+ class LockTest {
+ public:
+ void run() {
+ // quick atomicint wrap test
+ // MSGID likely assumes this semantic
+ AtomicUInt counter = 0xffffffff;
+ counter++;
+ ASSERT( counter == 0 );
+
+ writelocktry lk( "" , 0 );
+ ASSERT( lk.got() );
+ ASSERT( d.dbMutex.isWriteLocked() );
+ }
+ };
+
+ class RWLockTest1 {
+ public:
+ void run() {
+ RWLock lk( "eliot" );
+ {
+ rwlock r( lk , true , 1000 );
+ }
+ }
+ };
+
+ class RWLockTest2 {
+ public:
+
+ static void worker1( RWLockRecursiveNongreedy * lk , AtomicUInt * x ) {
+ (*x)++; // 1
+ //cout << "lock b try" << endl;
+ RWLockRecursiveNongreedy::Exclusive b(*lk);
+ //cout << "lock b got" << endl;
+ (*x)++; // 2
+ }
+
+ static void worker2( RWLockRecursiveNongreedy * lk , AtomicUInt * x ) {
+ //cout << "lock c try" << endl;
+ RWLockRecursiveNongreedy::Shared c(*lk);
+ (*x)++;
+ //cout << "lock c got" << endl;
+ }
+
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+ RWLockRecursiveNongreedy lk( "eliot2" , 120 * 1000 );
+ cout << "RWLock impl: " << lk.implType() << endl;
+
+ auto_ptr<RWLockRecursiveNongreedy::Shared> a( new RWLockRecursiveNongreedy::Shared(lk) );
+
+ AtomicUInt x1 = 0;
+ cout << "A : " << &x1 << endl;
+ boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
+ while ( ! x1 );
+ assert( x1 == 1 );
+ sleepmillis( 500 );
+ assert( x1 == 1 );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ a.reset();
+
+ for ( int i=0; i<2000; i++ ) {
+ if ( x1 == 2 )
+ break;
+ sleepmillis(1);
+ }
+
+ assert( x1 == 2 );
+ t1.join();
+
+ }
+ };
+
+
+
+ /** test of shared lock */
+ class RWLockTest3 {
+ public:
+
+ static void worker2( RWLockRecursiveNongreedy * lk , AtomicUInt * x ) {
+ assert( ! lk->__lock_try(0) );
+ //cout << "lock c try" << endl;
+ RWLockRecursiveNongreedy::Shared c( *lk );
+ (*x)++;
+ //cout << "lock c got" << endl;
+ }
+
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+ RWLockRecursiveNongreedy lk( "eliot2" , 120 * 1000 );
+
+ auto_ptr<RWLockRecursiveNongreedy::Shared> a( new RWLockRecursiveNongreedy::Shared( lk ) );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ a.reset();
+
+ }
+ };
+
+ class RWLockTest4 {
+ public:
+
+#if defined(__linux__) || defined(__APPLE__)
+ static void worker1( pthread_rwlock_t * lk , AtomicUInt * x ) {
+ (*x)++; // 1
+ cout << "lock b try" << endl;
+ while ( 1 ) {
+ if ( pthread_rwlock_trywrlock( lk ) == 0 )
+ break;
+ sleepmillis(10);
+ }
+ cout << "lock b got" << endl;
+ (*x)++; // 2
+ pthread_rwlock_unlock( lk );
+ }
+
+ static void worker2( pthread_rwlock_t * lk , AtomicUInt * x ) {
+ cout << "lock c try" << endl;
+ pthread_rwlock_rdlock( lk );
+ (*x)++;
+ cout << "lock c got" << endl;
+ pthread_rwlock_unlock( lk );
+ }
+#endif
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+#if defined(__linux__) || defined(__APPLE__)
+
+ // create
+ pthread_rwlock_t lk;
+ assert( pthread_rwlock_init( &lk , 0 ) == 0 );
+
+ // read lock
+ assert( pthread_rwlock_rdlock( &lk ) == 0 );
+
+ AtomicUInt x1 = 0;
+ boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
+ while ( ! x1 );
+ assert( x1 == 1 );
+ sleepmillis( 500 );
+ assert( x1 == 1 );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ pthread_rwlock_unlock( &lk );
+
+ for ( int i=0; i<2000; i++ ) {
+ if ( x1 == 2 )
+ break;
+ sleepmillis(1);
+ }
+
+ assert( x1 == 2 );
+ t1.join();
+#endif
+ }
+ };
+
+ class List1Test2 : public ThreadedTest<> {
+ static const int iterations = 1000; // note: a lot of iterations will use a lot of memory as List1 leaks on purpose
+ class M : public List1<M>::Base {
+ public:
+ M(int x) : _x(x) { }
+ const int _x;
+ };
+ List1<M> l;
+ public:
+ void validate() { }
+ void subthread(int) {
+ for(int i=0; i < iterations; i++) {
+ int r = std::rand() % 256;
+ if( r == 0 ) {
+ l.orphanAll();
+ }
+ else if( r < 4 ) {
+ l.push(new M(r));
+ }
+ else {
+ M *orph = 0;
+ for( M *m = l.head(); m; m=m->next() ) {
+ ASSERT( m->_x > 0 && m->_x < 4 );
+ if( r > 192 && std::rand() % 8 == 0 )
+ orph = m;
+ }
+ if( orph ) {
+ try {
+ l.orphan(orph);
+ }
+ catch(...) { }
+ }
+ }
+ }
+ }
+ };
+
+ class List1Test {
+ public:
+ class M : public List1<M>::Base {
+ ~M();
+ public:
+ M( int x ) {
+ num = x;
+ }
+ int num;
+ };
+
+ void run(){
+ List1<M> l;
+
+ vector<M*> ms;
+ for ( int i=0; i<5; i++ ) {
+ M * m = new M(i);
+ ms.push_back( m );
+ l.push( m );
+ }
+
+ // must assert as the item is missing
+ ASSERT_THROWS( l.orphan( new M( -3 ) ) , UserException );
+ }
+ };
+
+ class Hierarchical1 {
+ public:
+ void run() {
+ {
+ LockCollectionForReading x("bar");
+ }
+ {
+ LockCollectionForReading x("foo");
+ LockCollectionForReading y("foo"); // recursion is ok
+ }
+ {
+ LockCollectionForReading x("foo");
+ LockCollectionForReading y("foo.$bar");
+ }
+#if defined(CLC)
+ {
+ LockCollectionForWriting x("foo");
+ LockCollectionForWriting y("foo");
+ }
+ {
+ LockCollectionForReading x("foo");
+ ASSERT_THROWS( LockCollectionForWriting y("foo"), DBException )
+ }
+ {
+ LockCollectionForReading x("foo");
+ ASSERT_THROWS( LockCollectionForReading y("bar"), DBException )
+ }
+#endif
+ cout << "temp ok" << endl;
+ }
+ };
+
+#if 1
+ class UpgradableTest : public ThreadedTest<7> {
+ RWLock m;
+ public:
+ UpgradableTest() : m("utest") {}
+ private:
+ virtual void validate() { }
+ virtual void subthread(int x) {
+ Client::initThread("utest");
+
+ /* r = get a read lock
+ R = get a read lock and we expect it to be fast
+ u = get upgradable
+ U = get upgradable and we expect it to be fast
+ w = get a write lock
+ */
+ // /-- verify upgrade can be done instantly while in a read lock already
+ // | /-- verify upgrade acquisition isn't greedy
+ // | | /-- verify writes aren't greedy while in upgradable (or are they?)
+ // v v v
+ const char *what = " RURuRwR";
+
+ sleepmillis(100*x);
+
+ log() << x << ' ' << what[x] << " request" << endl;
+ char ch = what[x];
+ switch( ch ) {
+ case 'w':
+ {
+ m.lock();
+ log() << x << " w got" << endl;
+ sleepmillis(100);
+ log() << x << " w unlock" << endl;
+ m.unlock();
+ }
+ break;
+ case 'u':
+ case 'U':
+ {
+ Timer t;
+ RWLock::Upgradable u(m);
+ log() << x << ' ' << ch << " got" << endl;
+ if( ch == 'U' ) {
+#ifdef MONGO_USE_SRW_ON_WINDOWS
+ if( t.millis() > 200 ) {
+#else
+ if( t.millis() > 20 ) {
+#endif
+ DEV {
+ // a _DEBUG buildbot might be slow, try to avoid false positives
+ log() << "warning lock upgrade was slow " << t.millis() << endl;
+ }
+ else {
+ log() << "assertion failure: lock upgrade was too slow: " << t.millis() << endl;
+ ASSERT( false );
+ }
+ }
+ }
+ sleepsecs(1);
+ log() << x << ' ' << ch << " unlock" << endl;
+ }
+ break;
+ case 'r':
+ case 'R':
+ {
+ Timer t;
+ m.lock_shared();
+ log() << x << ' ' << ch << " got " << endl;
+ if( what[x] == 'R' ) {
+ if( t.millis() > 15 ) {
+ log() << x << " warning: when in upgradable, write locks are still greedy on this platform" << endl;
+ }
+ }
+ sleepmillis(200);
+ log() << x << ' ' << ch << " unlock" << endl;
+ m.unlock_shared();
+ }
+ break;
+ default:
+ ASSERT(false);
+ }
+
+ cc().shutdown();
+ }
+ };
+#endif
+
+ class WriteLocksAreGreedy : public ThreadedTest<3> {
+ public:
+ WriteLocksAreGreedy() : m("gtest") {}
+ private:
+ RWLock m;
+ virtual void validate() { }
+ virtual void subthread(int x) {
+ Client::initThread("utest");
+ if( x == 1 ) {
+ cout << mongo::curTimeMillis64() % 10000 << " 1" << endl;
+ rwlock_shared lk(m);
+ sleepmillis(300);
+ cout << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
+ }
+ if( x == 2 ) {
+ sleepmillis(100);
+ cout << mongo::curTimeMillis64() % 10000 << " 2" << endl;
+ rwlock lk(m, true);
+ //m._lock();
+ cout << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
+ //m.unlock();
+ }
+ if( x == 3 ) {
+ sleepmillis(200);
+ Timer t;
+ cout << mongo::curTimeMillis64() % 10000 << " 3" << endl;
+ rwlock_shared lk(m);
+ cout << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
+ cout << t.millis() << endl;
+ ASSERT( t.millis() > 50 );
+ }
+ cc().shutdown();
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "threading" ) { }
+
+ void setupTests() {
+ add< Hierarchical1 >();
+
+ add< WriteLocksAreGreedy >();
+ add< UpgradableTest >();
+ add< List1Test >();
+ add< List1Test2 >();
+
+ add< IsAtomicUIntAtomic >();
+ add< MVarTest >();
+ add< ThreadPoolTest >();
+ add< LockTest >();
+
+
+ add< RWLockTest1 >();
+ //add< RWLockTest2 >(); // SERVER-2996
+ add< RWLockTest3 >();
+ add< RWLockTest4 >();
+
+ add< MongoMutexTest >();
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
new file mode 100644
index 00000000000..c912bf437d0
--- /dev/null
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -0,0 +1,877 @@
+// updatetests.cpp : unit tests relating to update requests
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/ops/query.h"
+
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+#include "../db/ops/update.h"
+
+#include "dbtests.h"
+
+namespace UpdateTests {
+
+ class ClientBase {
+ public:
+ // NOTE: Not bothering to backup the old error record.
+ ClientBase() {
+ mongo::lastError.reset( new LastError() );
+ }
+ ~ClientBase() {
+ mongo::lastError.release();
+ }
+ protected:
+ static void insert( const char *ns, BSONObj o ) {
+ client_.insert( ns, o );
+ }
+ static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
+ client_.update( ns, Query( q ), o, upsert );
+ }
+ static bool error() {
+ return !client_.getPrevError().getField( "err" ).isNull();
+ }
+ DBDirectClient &client() const { return client_; }
+ private:
+ static DBDirectClient client_;
+ };
+ DBDirectClient ClientBase::client_;
+
+ class Fail : public ClientBase {
+ public:
+ virtual ~Fail() {}
+ void run() {
+ prep();
+ ASSERT( !error() );
+ doIt();
+ ASSERT( error() );
+ }
+ protected:
+ const char *ns() { return "unittests.UpdateTests_Fail"; }
+ virtual void prep() {
+ insert( ns(), fromjson( "{a:1}" ) );
+ }
+ virtual void doIt() = 0;
+ };
+
+ class ModId : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$set:{'_id':4}}" ) );
+ }
+ };
+
+ class ModNonmodMix : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$set:{a:4},z:3}" ) );
+ }
+ };
+
+ class InvalidMod : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$awk:{a:4}}" ) );
+ }
+ };
+
+ class ModNotFirst : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{z:3,$set:{a:4}}" ) );
+ }
+ };
+
+ class ModDuplicateFieldSpec : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$set:{a:4},$inc:{a:1}}" ) );
+ }
+ };
+
+ class IncNonNumber : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$inc:{a:'d'}}" ) );
+ }
+ };
+
+ class PushAllNonArray : public Fail {
+ void doIt() {
+ insert( ns(), fromjson( "{a:[1]}" ) );
+ update( ns(), BSONObj(), fromjson( "{$pushAll:{a:'d'}}" ) );
+ }
+ };
+
+ class PullAllNonArray : public Fail {
+ void doIt() {
+ insert( ns(), fromjson( "{a:[1]}" ) );
+ update( ns(), BSONObj(), fromjson( "{$pullAll:{a:'d'}}" ) );
+ }
+ };
+
+ class IncTargetNonNumber : public Fail {
+ void doIt() {
+ insert( ns(), BSON( "a" << "a" ) );
+ update( ns(), BSON( "a" << "a" ), fromjson( "{$inc:{a:1}}" ) );
+ }
+ };
+
+ class SetBase : public ClientBase {
+ public:
+ ~SetBase() {
+ client().dropCollection( ns() );
+ }
+ protected:
+ const char *ns() { return "unittests.updatetests.SetBase"; }
+ };
+
+ class SetNum : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << 1 ) );
+ client().update( ns(), BSON( "a" << 1 ), BSON( "$set" << BSON( "a" << 4 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << 4 ) ).isEmpty() );
+ }
+ };
+
+ class SetString : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "b" ) );
+ client().update( ns(), BSON( "a" << "b" ), BSON( "$set" << BSON( "a" << "c" ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << "c" ) ).isEmpty() );
+ }
+ };
+
+ class SetStringDifferentLength : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "b" ) );
+ client().update( ns(), BSON( "a" << "b" ), BSON( "$set" << BSON( "a" << "cd" ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << "cd" ) ).isEmpty() );
+ }
+ };
+
+ class SetStringToNum : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "b" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 5 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << 5 ) ).isEmpty() );
+ }
+ };
+
+ class SetStringToNumInPlace : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "bcd" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 5.0 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << 5.0 ) ).isEmpty() );
+ }
+ };
+
+ class ModDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$inc" << BSON( "a.b" << 10 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a.b" << 14 ) ).isEmpty() );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 55 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a.b" << 55 ) ).isEmpty() );
+ }
+ };
+
+ class SetInPlaceDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{a:{b:'cdef'}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "llll" ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a.b" << "llll" ) ).isEmpty() );
+ }
+ };
+
+ class SetRecreateDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:'cdef'}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
+ ASSERT( client().findOne( ns(), BSON( "a.b" << "lllll" ) ).woCompare( fromjson( "{'_id':0,a:{b:'lllll'}}" ) ) == 0 );
+ }
+ };
+
+ class SetMissingDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), BSONObj(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
+ ASSERT( client().findOne( ns(), BSON( "a.b" << "lllll" ) ).woCompare( fromjson( "{'_id':0,a:{b:'lllll'}}" ) ) == 0 );
+ }
+ };
+
+ class SetAdjacentDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{c:4}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), BSON( "a.b" << "lllll" ) ) , fromjson( "{'_id':0,a:{b:'lllll',c:4}}" ) );
+ }
+ };
+
+ class IncMissing : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$inc" << BSON( "f" << 3.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,f:3}" ) ) == 0 );
+ }
+ };
+
+ class MultiInc : public SetBase {
+ public:
+
+ string s() {
+ stringstream ss;
+ auto_ptr<DBClientCursor> cc = client().query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
+ bool first = true;
+ while ( cc->more() ) {
+ if ( first ) first = false;
+ else ss << ",";
+
+ BSONObj o = cc->next();
+ ss << o["x"].numberInt();
+ }
+ return ss.str();
+ }
+
+ void run() {
+ client().insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
+ client().insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
+
+ ASSERT_EQUALS( "1,5" , s() );
+
+ client().update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "2,5" , s() );
+
+ client().update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "3,5" , s() );
+
+ client().update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
+ ASSERT_EQUALS( "4,6" , s() );
+
+ }
+ };
+
+ class UnorderedNewSet : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "f.g.h" << 3.0 << "f.g.a" << 2.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,f:{g:{a:2,h:3}}}" ) ) == 0 );
+ }
+ };
+
+ class UnorderedNewSetAdjacent : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), BSONObj(), BSON( "$set" << BSON( "f.g.h.b" << 3.0 << "f.g.a.b" << 2.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,f:{g:{a:{b:2},h:{b:3}}}}" ) ) == 0 );
+ }
+ };
+
+ class ArrayEmbeddedSet : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,z:[4,'b']}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "z.0" << "a" ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0,z:['a','b']}" ) );
+ }
+ };
+
+ class AttemptEmbedInExistingNum : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:1}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 1 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:1}" ) ) == 0 );
+ }
+ };
+
+ class AttemptEmbedConflictsWithOtherSet : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 2 << "a.b" << 1 ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0}" ) );
+ }
+ };
+
+ class ModMasksEmbeddedConflict : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:2}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 2 << "a.b" << 1 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:2}}" ) ) == 0 );
+ }
+ };
+
+ class ModOverwritesExistingObject : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:2}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << BSON( "c" << 2 ) ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{c:2}}" ) ) == 0 );
+ }
+ };
+
+ class InvalidEmbeddedSet : public Fail {
+ public:
+ virtual void doIt() {
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a." << 1 ) ) );
+ }
+ };
+
+ class UpsertMissingEmbedded : public SetBase {
+ public:
+ void run() {
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 1 ) ), true );
+ ASSERT( !client().findOne( ns(), QUERY( "a.b" << 1 ) ).isEmpty() );
+ }
+ };
+
+ class Push : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,5]}" ) );
+ }
+ };
+
+ class PushInvalidEltType : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:1}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:1}" ) ) == 0 );
+ }
+ };
+
+ class PushConflictsWithOtherMod : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 1 ) <<"$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[1]}" ) ) == 0 );
+ }
+ };
+
+ class PushFromNothing : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[5]}" ) );
+ }
+ };
+
+ class PushFromEmpty : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[5]}" ) ) == 0 );
+ }
+ };
+
+ class PushInsideNothing : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a.b" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:[5]}}" ) ) == 0 );
+ }
+ };
+
+ class CantPushInsideOtherMod : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << BSONObj() ) << "$push" << BSON( "a.b" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0}" ) ) == 0 );
+ }
+ };
+
+ class CantPushTwice : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 4 ) << "$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[]}" ) ) == 0 );
+ }
+ };
+
+ class SetEncapsulationConflictsWithExistingType : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b.c" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
+ }
+ };
+
+ class CantPushToParent : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
+ }
+ };
+
+ class CantIncParent : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$inc" << BSON( "a" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
+ }
+ };
+
+ class DontDropEmpty : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:{}}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.c" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:{},c:4}}" ) ) == 0 );
+ }
+ };
+
+ class InsertInEmpty : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:{}}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b.f" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:{f:4}}}" ) ) == 0 );
+ }
+ };
+
+ class IndexParentOfMod : public SetBase {
+ public:
+ void run() {
+ client().ensureIndex( ns(), BSON( "a" << 1 ) );
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), fromjson( "{$set:{'a.b':4}}" ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), Query() ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), fromjson( "{'a.b':4}" ) ) ); // make sure the index works
+ }
+ };
+
+ class IndexModSet : public SetBase {
+ public:
+ void run() {
+ client().ensureIndex( ns(), BSON( "a.b" << 1 ) );
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:3}}" ) );
+ client().update( ns(), Query(), fromjson( "{$set:{'a.b':4}}" ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), Query() ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), fromjson( "{'a.b':4}" ) ) ); // make sure the index works
+ }
+ };
+
+
+ class PreserveIdWithIndex : public SetBase { // Not using $set, but base class is still useful
+ public:
+ void run() {
+ client().insert( ns(), BSON( "_id" << 55 << "i" << 5 ) );
+ client().update( ns(), BSON( "i" << 5 ), BSON( "i" << 6 ) );
+ ASSERT( !client().findOne( ns(), Query( BSON( "_id" << 55 ) ).hint
+ ( "{\"_id\":ObjectId(\"000000000000000000000000\")}" ) ).isEmpty() );
+ }
+ };
+
+ class CheckNoMods : public SetBase {
+ public:
+ void run() {
+ client().update( ns(), BSONObj(), BSON( "i" << 5 << "$set" << BSON( "q" << 3 ) ), true );
+ ASSERT( error() );
+ }
+ };
+
+ class UpdateMissingToNull : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << 5 ) );
+ client().update( ns(), BSON( "a" << 5 ), fromjson( "{$set:{b:null}}" ) );
+ ASSERT_EQUALS( jstNULL, client().findOne( ns(), QUERY( "a" << 5 ) ).getField( "b" ).type() );
+ }
+ };
+
+ namespace ModSetTests {
+
+ class internal1 {
+ public:
+ void run() {
+ BSONObj b = BSON( "$inc" << BSON( "x" << 1 << "a.b" << 1 ) );
+ ModSet m(b);
+
+ ASSERT( m.haveModForField( "x" ) );
+ ASSERT( m.haveModForField( "a.b" ) );
+ ASSERT( ! m.haveModForField( "y" ) );
+ ASSERT( ! m.haveModForField( "a.c" ) );
+ ASSERT( ! m.haveModForField( "a" ) );
+
+ ASSERT( m.haveConflictingMod( "x" ) );
+ ASSERT( m.haveConflictingMod( "a" ) );
+ ASSERT( m.haveConflictingMod( "a.b" ) );
+ ASSERT( ! m.haveConflictingMod( "a.bc" ) );
+ ASSERT( ! m.haveConflictingMod( "a.c" ) );
+ ASSERT( ! m.haveConflictingMod( "a.a" ) );
+ }
+ };
+
+ class Base {
+ public:
+
+ virtual ~Base() {}
+
+
+ void test( BSONObj morig , BSONObj in , BSONObj wanted ) {
+ BSONObj m = morig.copy();
+ ModSet set(m);
+
+ BSONObj out = set.prepare(in)->createNewFromMods();
+ ASSERT_EQUALS( wanted , out );
+ }
+ };
+
+ class inc1 : public Base {
+ public:
+ void run() {
+ BSONObj m = BSON( "$inc" << BSON( "x" << 1 ) );
+ test( m , BSON( "x" << 5 ) , BSON( "x" << 6 ) );
+ test( m , BSON( "a" << 5 ) , BSON( "a" << 5 << "x" << 1 ) );
+ test( m , BSON( "z" << 5 ) , BSON( "x" << 1 << "z" << 5 ) );
+ }
+ };
+
+ class inc2 : public Base {
+ public:
+ void run() {
+ BSONObj m = BSON( "$inc" << BSON( "a.b" << 1 ) );
+ test( m , BSONObj() , BSON( "a" << BSON( "b" << 1 ) ) );
+ test( m , BSON( "a" << BSON( "b" << 2 ) ) , BSON( "a" << BSON( "b" << 3 ) ) );
+
+ m = BSON( "$inc" << BSON( "a.b" << 1 << "a.c" << 1 ) );
+ test( m , BSONObj() , BSON( "a" << BSON( "b" << 1 << "c" << 1 ) ) );
+
+
+ }
+ };
+
+ class set1 : public Base {
+ public:
+ void run() {
+ test( BSON( "$set" << BSON( "x" << 17 ) ) , BSONObj() , BSON( "x" << 17 ) );
+ test( BSON( "$set" << BSON( "x" << 17 ) ) , BSON( "x" << 5 ) , BSON( "x" << 17 ) );
+
+ test( BSON( "$set" << BSON( "x.a" << 17 ) ) , BSON( "z" << 5 ) , BSON( "x" << BSON( "a" << 17 )<< "z" << 5 ) );
+ }
+ };
+
+ class push1 : public Base {
+ public:
+ void run() {
+ test( BSON( "$push" << BSON( "a" << 5 ) ) , fromjson( "{a:[1]}" ) , fromjson( "{a:[1,5]}" ) );
+ }
+ };
+
+ };
+
+ namespace basic {
+ class Base : public ClientBase {
+ protected:
+
+ virtual const char * ns() = 0;
+ virtual void dotest() = 0;
+
+ void insert( const BSONObj& o ) {
+ client().insert( ns() , o );
+ }
+
+ void update( const BSONObj& m ) {
+ client().update( ns() , BSONObj() , m );
+ }
+
+ BSONObj findOne() {
+ return client().findOne( ns() , BSONObj() );
+ }
+
+ void test( const char* initial , const char* mod , const char* after ) {
+ test( fromjson( initial ) , fromjson( mod ) , fromjson( after ) );
+ }
+
+
+ void test( const BSONObj& initial , const BSONObj& mod , const BSONObj& after ) {
+ client().dropCollection( ns() );
+ insert( initial );
+ update( mod );
+ ASSERT_EQUALS( after , findOne() );
+ client().dropCollection( ns() );
+ }
+
+ public:
+
+ Base() {}
+ virtual ~Base() {
+ }
+
+ void run() {
+ client().dropCollection( ns() );
+
+ dotest();
+
+ client().dropCollection( ns() );
+ }
+ };
+
+ class SingleTest : public Base {
+ virtual BSONObj initial() = 0;
+ virtual BSONObj mod() = 0;
+ virtual BSONObj after() = 0;
+
+ void dotest() {
+ test( initial() , mod() , after() );
+ }
+
+ };
+
+ class inc1 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 1 );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 3 );
+ }
+ virtual const char * ns() {
+ return "unittests.inc1";
+ }
+
+ };
+
+ class inc2 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 1 );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2.5 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 3.5 );
+ }
+ virtual const char * ns() {
+ return "unittests.inc2";
+ }
+
+ };
+
+ class inc3 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 537142123123LL );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 537142123125LL );
+ }
+ virtual const char * ns() {
+ return "unittests.inc3";
+ }
+
+ };
+
+ class inc4 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 537142123123LL );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2LL ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 537142123125LL );
+ }
+ virtual const char * ns() {
+ return "unittests.inc4";
+ }
+
+ };
+
+ class inc5 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 537142123123LL );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2.0 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 537142123125LL );
+ }
+ virtual const char * ns() {
+ return "unittests.inc5";
+ }
+
+ };
+
+ class inc6 : public Base {
+
+ virtual const char * ns() {
+ return "unittests.inc6";
+ }
+
+
+ virtual BSONObj initial() { return BSONObj(); }
+ virtual BSONObj mod() { return BSONObj(); }
+ virtual BSONObj after() { return BSONObj(); }
+
+ void dotest() {
+ long long start = numeric_limits<int>::max() - 5;
+ long long max = numeric_limits<int>::max() + 5ll;
+
+ client().insert( ns() , BSON( "x" << (int)start ) );
+ ASSERT( findOne()["x"].type() == NumberInt );
+
+ while ( start < max ) {
+ update( BSON( "$inc" << BSON( "x" << 1 ) ) );
+ start += 1;
+ ASSERT_EQUALS( start , findOne()["x"].numberLong() ); // SERVER-2005
+ }
+
+ ASSERT( findOne()["x"].type() == NumberLong );
+ }
+ };
+
+ class bit1 : public Base {
+ const char * ns() {
+ return "unittests.bit1";
+ }
+ void dotest() {
+ test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 ) ) ) , BSON( "_id" << 1 << "x" << ( 3 & 2 ) ) );
+ test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$bit" << BSON( "x" << BSON( "or" << 4 ) ) ) , BSON( "_id" << 1 << "x" << ( 1 | 4 ) ) );
+ test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 << "or" << 8 ) ) ) , BSON( "_id" << 1 << "x" << ( ( 3 & 2 ) | 8 ) ) );
+ test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "or" << 2 << "and" << 8 ) ) ) , BSON( "_id" << 1 << "x" << ( ( 3 | 2 ) & 8 ) ) );
+
+ }
+ };
+
+ class unset : public Base {
+ const char * ns() {
+ return "unittests.unset";
+ }
+ void dotest() {
+ test( "{_id:1,x:1}" , "{$unset:{x:1}}" , "{_id:1}" );
+ }
+ };
+
+ class setswitchint : public Base {
+ const char * ns() {
+ return "unittests.int1";
+ }
+ void dotest() {
+ test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$set" << BSON( "x" << 5.6 ) ) , BSON( "_id" << 1 << "x" << 5.6 ) );
+ test( BSON( "_id" << 1 << "x" << 5.6 ) , BSON( "$set" << BSON( "x" << 1 ) ) , BSON( "_id" << 1 << "x" << 1 ) );
+ }
+ };
+
+
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "update" ) {
+ }
+ void setupTests() {
+ add< ModId >();
+ add< ModNonmodMix >();
+ add< InvalidMod >();
+ add< ModNotFirst >();
+ add< ModDuplicateFieldSpec >();
+ add< IncNonNumber >();
+ add< PushAllNonArray >();
+ add< PullAllNonArray >();
+ add< IncTargetNonNumber >();
+ add< SetNum >();
+ add< SetString >();
+ add< SetStringDifferentLength >();
+ add< SetStringToNum >();
+ add< SetStringToNumInPlace >();
+ add< ModDotted >();
+ add< SetInPlaceDotted >();
+ add< SetRecreateDotted >();
+ add< SetMissingDotted >();
+ add< SetAdjacentDotted >();
+ add< IncMissing >();
+ add< MultiInc >();
+ add< UnorderedNewSet >();
+ add< UnorderedNewSetAdjacent >();
+ add< ArrayEmbeddedSet >();
+ add< AttemptEmbedInExistingNum >();
+ add< AttemptEmbedConflictsWithOtherSet >();
+ add< ModMasksEmbeddedConflict >();
+ add< ModOverwritesExistingObject >();
+ add< InvalidEmbeddedSet >();
+ add< UpsertMissingEmbedded >();
+ add< Push >();
+ add< PushInvalidEltType >();
+ add< PushConflictsWithOtherMod >();
+ add< PushFromNothing >();
+ add< PushFromEmpty >();
+ add< PushInsideNothing >();
+ add< CantPushInsideOtherMod >();
+ add< CantPushTwice >();
+ add< SetEncapsulationConflictsWithExistingType >();
+ add< CantPushToParent >();
+ add< CantIncParent >();
+ add< DontDropEmpty >();
+ add< InsertInEmpty >();
+ add< IndexParentOfMod >();
+ add< IndexModSet >();
+ add< PreserveIdWithIndex >();
+ add< CheckNoMods >();
+ add< UpdateMissingToNull >();
+
+ add< ModSetTests::internal1 >();
+ add< ModSetTests::inc1 >();
+ add< ModSetTests::inc2 >();
+ add< ModSetTests::set1 >();
+ add< ModSetTests::push1 >();
+
+ add< basic::inc1 >();
+ add< basic::inc2 >();
+ add< basic::inc3 >();
+ add< basic::inc4 >();
+ add< basic::inc5 >();
+ add< basic::inc6 >();
+ add< basic::bit1 >();
+ add< basic::unset >();
+ add< basic::setswitchint >();
+ }
+ } myall;
+
+} // namespace UpdateTests
+