summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/accumulatortests.cpp1937
-rw-r--r--src/mongo/dbtests/basictests.cpp905
-rw-r--r--src/mongo/dbtests/chunktests.cpp547
-rw-r--r--src/mongo/dbtests/clienttests.cpp335
-rw-r--r--src/mongo/dbtests/commandtests.cpp405
-rw-r--r--src/mongo/dbtests/config_server_fixture.cpp158
-rw-r--r--src/mongo/dbtests/config_server_fixture.h172
-rw-r--r--src/mongo/dbtests/config_upgrade_tests.cpp353
-rw-r--r--src/mongo/dbtests/counttests.cpp252
-rw-r--r--src/mongo/dbtests/dbclient_multi_command_test.cpp12
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp348
-rw-r--r--src/mongo/dbtests/dbtests.cpp72
-rw-r--r--src/mongo/dbtests/dbtests.h34
-rw-r--r--src/mongo/dbtests/directclienttests.cpp314
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp3784
-rw-r--r--src/mongo/dbtests/documenttests.cpp3058
-rw-r--r--src/mongo/dbtests/executor_registry.cpp492
-rw-r--r--src/mongo/dbtests/expressiontests.cpp7864
-rw-r--r--src/mongo/dbtests/framework.cpp146
-rw-r--r--src/mongo/dbtests/framework.h6
-rw-r--r--src/mongo/dbtests/framework_options.cpp288
-rw-r--r--src/mongo/dbtests/framework_options.h48
-rw-r--r--src/mongo/dbtests/framework_options_init.cpp41
-rw-r--r--src/mongo/dbtests/gle_test.cpp151
-rw-r--r--src/mongo/dbtests/gridfstest.cpp46
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp271
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp1096
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp4094
-rw-r--r--src/mongo/dbtests/jsontests.cpp5775
-rw-r--r--src/mongo/dbtests/jstests.cpp4041
-rw-r--r--src/mongo/dbtests/matchertests.cpp465
-rw-r--r--src/mongo/dbtests/merge_chunk_tests.cpp599
-rw-r--r--src/mongo/dbtests/mmaptests.cpp225
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.cpp110
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.h136
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp267
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h189
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_cursor.cpp24
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_cursor.h37
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp301
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.h343
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.cpp417
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.h195
-rw-r--r--src/mongo/dbtests/mock_dbclient_conn_test.cpp875
-rw-r--r--src/mongo/dbtests/mock_replica_set_test.cpp580
-rw-r--r--src/mongo/dbtests/namespacetests.cpp435
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp674
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp251
-rw-r--r--src/mongo/dbtests/perftests.cpp2823
-rw-r--r--src/mongo/dbtests/pipelinetests.cpp844
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp1447
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp451
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp846
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp2525
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp404
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp649
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp574
-rw-r--r--src/mongo/dbtests/query_stage_count_scan.cpp1157
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp255
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp357
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp324
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp544
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp342
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp112
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp1089
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp366
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp668
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp213
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp364
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp554
-rw-r--r--src/mongo/dbtests/querytests.cpp3103
-rw-r--r--src/mongo/dbtests/replica_set_monitor_test.cpp2424
-rw-r--r--src/mongo/dbtests/repltests.cpp2781
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp1245
-rw-r--r--src/mongo/dbtests/sharding.cpp1026
-rw-r--r--src/mongo/dbtests/socktests.cpp49
-rw-r--r--src/mongo/dbtests/threadedtests.cpp1311
-rw-r--r--src/mongo/dbtests/updatetests.cpp3899
78 files changed, 38813 insertions, 37101 deletions
diff --git a/src/mongo/dbtests/accumulatortests.cpp b/src/mongo/dbtests/accumulatortests.cpp
index f57bbfcd787..031c7e8e180 100644
--- a/src/mongo/dbtests/accumulatortests.cpp
+++ b/src/mongo/dbtests/accumulatortests.cpp
@@ -37,889 +37,1054 @@
namespace AccumulatorTests {
- using boost::intrusive_ptr;
- using std::numeric_limits;
- using std::string;
-
- class Base {
- protected:
- BSONObj fromDocument( const Document& document ) {
- return document.toBson();
- }
- BSONObj fromValue( const Value& value ) {
- BSONObjBuilder bob;
- value.addToBsonObj( &bob, "" );
- return bob.obj();
- }
- /** Check binary equality, ensuring use of the same numeric types. */
- void assertBinaryEqual( const BSONObj& expected, const BSONObj& actual ) const {
- ASSERT_EQUALS( expected, actual );
- ASSERT( expected.binaryEqual( actual ) );
- }
- private:
- intrusive_ptr<ExpressionContext> _shard;
- intrusive_ptr<ExpressionContext> _router;
- };
-
- namespace Avg {
-
- class Base : public AccumulatorTests::Base {
- public:
- virtual ~Base() {
- }
- protected:
- void createAccumulator() {
- _accumulator = AccumulatorAvg::create();
- ASSERT_EQUALS(string("$avg"), _accumulator->getOpName());
- }
- Accumulator *accumulator() { return _accumulator.get(); }
- private:
- intrusive_ptr<Accumulator> _accumulator;
- };
-
- /** No documents evaluated. */
- class None : public Base {
- public:
- void run() {
- createAccumulator();
- ASSERT_EQUALS( 0, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** One int value is converted to double. */
- class OneInt : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(3), false);
- ASSERT_EQUALS( 3, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** One long value is converted to double. */
- class OneLong : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(-4LL), false);
- ASSERT_EQUALS( -4, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** One double value. */
- class OneDouble : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(22.6), false);
- ASSERT_EQUALS( 22.6, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** The average of two ints is an int, even if inexact. */
- class IntInt : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(10), false);
- accumulator()->process(Value(11), false);
- ASSERT_EQUALS( 10.5, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** The average of an int and a double is calculated as a double. */
- class IntDouble : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(10), false);
- accumulator()->process(Value(11.0), false);
- ASSERT_EQUALS( 10.5, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** Unlike $sum, two ints do not overflow in the 'total' portion of the average. */
- class IntIntNoOverflow : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(numeric_limits<int>::max()), false);
- accumulator()->process(Value(numeric_limits<int>::max()), false);
- ASSERT_EQUALS(numeric_limits<int>::max(),
- accumulator()->getValue(false).getDouble());
- }
- };
-
- /** Two longs do overflow in the 'total' portion of the average. */
- class LongLongOverflow : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(numeric_limits<long long>::max()), false);
- accumulator()->process(Value(numeric_limits<long long>::max()), false);
- ASSERT_EQUALS( ( (double)numeric_limits<long long>::max() +
- numeric_limits<long long>::max() ) / 2.0,
- accumulator()->getValue(false).getDouble() );
- }
- };
-
- namespace Shard {
- class SingleOperandBase : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(operand(), false);
- assertBinaryEqual( expectedResult(),
- fromDocument(accumulator()->getValue(true).getDocument()));
- }
- protected:
- virtual Value operand() = 0;
- virtual BSONObj expectedResult() = 0;
- };
-
- /** Shard result for one integer. */
- class Int : public SingleOperandBase {
- Value operand() { return Value(3); }
- BSONObj expectedResult() { return BSON( "subTotal" << 3.0 << "count" << 1LL ); }
- };
-
- /** Shard result for one long. */
- class Long : public SingleOperandBase {
- Value operand() { return Value(5LL); }
- BSONObj expectedResult() { return BSON( "subTotal" << 5.0 << "count" << 1LL ); }
- };
-
- /** Shard result for one double. */
- class Double : public SingleOperandBase {
- Value operand() { return Value(116.0); }
- BSONObj expectedResult() { return BSON( "subTotal" << 116.0 << "count" << 1LL ); }
- };
-
- class TwoOperandBase : public Base {
- public:
- void run() {
- checkAvg( operand1(), operand2() );
- checkAvg( operand2(), operand1() );
- }
- protected:
- virtual Value operand1() = 0;
- virtual Value operand2() = 0;
- virtual BSONObj expectedResult() = 0;
- private:
- void checkAvg( const Value& a, const Value& b ) {
- createAccumulator();
- accumulator()->process(a, false);
- accumulator()->process(b, false);
- assertBinaryEqual(expectedResult(),
- fromDocument(accumulator()->getValue(true).getDocument()));
- }
- };
-
- /** Shard two ints overflow. */
- class IntIntOverflow : public TwoOperandBase {
- Value operand1() { return Value(numeric_limits<int>::max()); }
- Value operand2() { return Value(3); }
- BSONObj expectedResult() {
- return BSON( "subTotal" << numeric_limits<int>::max() + 3.0 << "count" << 2LL );
- }
- };
-
- /** Shard avg an int and a long. */
- class IntLong : public TwoOperandBase {
- Value operand1() { return Value(5); }
- Value operand2() { return Value(3LL); }
- BSONObj expectedResult() { return BSON( "subTotal" << 8.0 << "count" << 2LL ); }
- };
-
- /** Shard avg an int and a double. */
- class IntDouble : public TwoOperandBase {
- Value operand1() { return Value(5); }
- Value operand2() { return Value(6.2); }
- BSONObj expectedResult() { return BSON( "subTotal" << 11.2 << "count" << 2LL ); }
- };
-
- /** Shard avg a long and a double. */
- class LongDouble : public TwoOperandBase {
- Value operand1() { return Value(5LL); }
- Value operand2() { return Value(1.0); }
- BSONObj expectedResult() { return BSON( "subTotal" << 6.0 << "count" << 2LL ); }
- };
-
- /** Shard avg an int, long, and double. */
- class IntLongDouble : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(1), false);
- accumulator()->process(Value(2LL), false);
- accumulator()->process(Value(4.0), false);
- assertBinaryEqual(BSON( "subTotal" << 7.0 << "count" << 3LL ),
- fromDocument(accumulator()->getValue(true).getDocument()));
- }
- };
-
- } // namespace Shard
-
- namespace Router {
- /** Router result from one shard. */
- class OneShard : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(DOC("subTotal" << 3.0 << "count" << 2LL)), true);
- assertBinaryEqual( BSON( "" << 3.0 / 2 ),
- fromValue( accumulator()->getValue(false) ) );
- }
- };
-
- /** Router result from two shards. */
- class TwoShards : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(DOC("subTotal" << 6.0 << "count" << 1LL)), true);
- accumulator()->process(Value(DOC("subTotal" << 5.0 << "count" << 2LL)), true);
- assertBinaryEqual( BSON( "" << 11.0 / 3 ),
- fromValue( accumulator()->getValue(false) ) );
- }
- };
-
- } // namespace Router
-
- } // namespace Avg
-
- namespace First {
-
- class Base : public AccumulatorTests::Base {
- protected:
- void createAccumulator() {
- _accumulator = AccumulatorFirst::create();
- ASSERT_EQUALS(string("$first"), _accumulator->getOpName());
- }
- Accumulator *accumulator() { return _accumulator.get(); }
- private:
- intrusive_ptr<Accumulator> _accumulator;
- };
-
- /** The accumulator evaluates no documents. */
- class None : public Base {
- public:
- void run() {
- createAccumulator();
- // The accumulator returns no value in this case.
- ASSERT( accumulator()->getValue(false).missing() );
- }
- };
-
- /* The accumulator evaluates one document and retains its value. */
- class One : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- ASSERT_EQUALS( 5, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates one document with the field missing, returns missing value. */
- class Missing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(), false);
- ASSERT_EQUALS( EOO, accumulator()->getValue(false).getType() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the value in the first. */
- class Two : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- accumulator()->process(Value(7), false);
- ASSERT_EQUALS( 5, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the missing value in the first. */
- class FirstMissing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(), false);
- accumulator()->process(Value(7), false);
- ASSERT_EQUALS( EOO, accumulator()->getValue(false).getType() );
- }
- };
-
- } // namespace First
-
- namespace Last {
-
- class Base : public AccumulatorTests::Base {
- protected:
- void createAccumulator() {
- _accumulator = AccumulatorLast::create();
- ASSERT_EQUALS(string("$last"), _accumulator->getOpName());
- }
- Accumulator *accumulator() { return _accumulator.get(); }
- private:
- intrusive_ptr<Accumulator> _accumulator;
- };
-
- /** The accumulator evaluates no documents. */
- class None : public Base {
- public:
- void run() {
- createAccumulator();
- // The accumulator returns no value in this case.
- ASSERT( accumulator()->getValue(false).missing() );
- }
- };
-
- /* The accumulator evaluates one document and retains its value. */
- class One : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- ASSERT_EQUALS( 5, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates one document with the field missing retains undefined. */
- class Missing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(), false);
- ASSERT_EQUALS( EOO , accumulator()->getValue(false).getType() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the value in the last. */
- class Two : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- accumulator()->process(Value(7), false);
- ASSERT_EQUALS( 7, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the undefined value in the last. */
- class LastMissing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(7), false);
- accumulator()->process(Value(), false);
- ASSERT_EQUALS( EOO , accumulator()->getValue(false).getType() );
- }
- };
-
- } // namespace Last
-
- namespace Min {
-
- class Base : public AccumulatorTests::Base {
- protected:
- void createAccumulator() {
- _accumulator = AccumulatorMinMax::createMin();
- ASSERT_EQUALS(string("$min"), _accumulator->getOpName());
- }
- Accumulator *accumulator() { return _accumulator.get(); }
- private:
- intrusive_ptr<Accumulator> _accumulator;
- };
-
- /** The accumulator evaluates no documents. */
- class None : public Base {
- public:
- void run() {
- createAccumulator();
- // The accumulator returns no value in this case.
- ASSERT( accumulator()->getValue(false).missing() );
- }
- };
-
- /* The accumulator evaluates one document and retains its value. */
- class One : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- ASSERT_EQUALS( 5, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates one document with the field missing retains undefined. */
- class Missing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(), false);
- ASSERT_EQUALS( EOO , accumulator()->getValue(false).getType() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the minimum value. */
- class Two : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- accumulator()->process(Value(7), false);
- ASSERT_EQUALS( 5, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the undefined value. */
- class LastMissing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(7), false);
- accumulator()->process(Value(), false);
- ASSERT_EQUALS( 7 , accumulator()->getValue(false).getInt() );
- }
- };
-
- } // namespace Min
-
- namespace Max {
-
- class Base : public AccumulatorTests::Base {
- protected:
- void createAccumulator() {
- _accumulator = AccumulatorMinMax::createMax();
- ASSERT_EQUALS(string("$max"), _accumulator->getOpName());
- }
- Accumulator *accumulator() { return _accumulator.get(); }
- private:
- intrusive_ptr<Accumulator> _accumulator;
- };
-
- /** The accumulator evaluates no documents. */
- class None : public Base {
- public:
- void run() {
- createAccumulator();
- // The accumulator returns no value in this case.
- ASSERT( accumulator()->getValue(false).missing() );
- }
- };
-
- /* The accumulator evaluates one document and retains its value. */
- class One : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- ASSERT_EQUALS( 5, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates one document with the field missing retains undefined. */
- class Missing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(), false);
- ASSERT_EQUALS( EOO, accumulator()->getValue(false).getType() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the maximum value. */
- class Two : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- accumulator()->process(Value(7), false);
- ASSERT_EQUALS( 7, accumulator()->getValue(false).getInt() );
- }
- };
-
- /* The accumulator evaluates two documents and retains the defined value. */
- class LastMissing : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(7), false);
- accumulator()->process(Value(), false);
- ASSERT_EQUALS( 7, accumulator()->getValue(false).getInt() );
- }
- };
-
- } // namespace Max
-
- namespace Sum {
-
- class Base : public AccumulatorTests::Base {
- protected:
- void createAccumulator() {
- _accumulator = AccumulatorSum::create();
- ASSERT_EQUALS(string("$sum"), _accumulator->getOpName());
- }
- Accumulator *accumulator() { return _accumulator.get(); }
- private:
- intrusive_ptr<Accumulator> _accumulator;
- };
-
- /** No documents evaluated. */
- class None : public Base {
- public:
- void run() {
- createAccumulator();
- ASSERT_EQUALS( 0, accumulator()->getValue(false).getInt() );
- }
- };
-
- /** An int. */
- class OneInt : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- ASSERT_EQUALS( 5, accumulator()->getValue(false).getInt() );
- }
- };
-
- /** A long. */
- class OneLong : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(6LL), false);
- ASSERT_EQUALS( 6, accumulator()->getValue(false).getLong() );
- }
- };
-
- /** A long that cannot be expressed as an int. */
- class OneLageLong : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(60000000000LL), false);
- ASSERT_EQUALS( 60000000000LL, accumulator()->getValue(false).getLong() );
- }
- };
-
- /** A double. */
- class OneDouble : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(7.0), false);
- ASSERT_EQUALS( 7.0, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** A non integer valued double. */
- class OneFractionalDouble : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(7.5), false);
- ASSERT_EQUALS( 7.5, accumulator()->getValue(false).getDouble() );
- }
- };
-
- /** A nan double. */
- class OneNanDouble : public Base {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(numeric_limits<double>::quiet_NaN()), false);
- // NaN is unequal to itself.
- ASSERT_NOT_EQUALS( accumulator()->getValue(false).getDouble(),
- accumulator()->getValue(false).getDouble() );
- }
- };
-
- class TypeConversionBase : public Base {
- public:
- virtual ~TypeConversionBase() {
- }
- void run() {
- checkPairSum( summand1(), summand2() );
- checkPairSum( summand2(), summand1() );
- }
- protected:
- virtual Value summand1() { verify( false ); }
- virtual Value summand2() { verify( false ); }
- virtual Value expectedSum() = 0;
- void checkPairSum( Value first, Value second ) {
- createAccumulator();
- accumulator()->process(first, false);
- accumulator()->process(second, false);
- checkSum();
- }
- void checkSum() {
- Value result = accumulator()->getValue(false);
- ASSERT_EQUALS( expectedSum(), result );
- ASSERT_EQUALS( expectedSum().getType(), result.getType() );
- }
- };
-
- /** Two ints are summed. */
- class IntInt : public TypeConversionBase {
- Value summand1() { return Value(4); }
- Value summand2() { return Value(5); }
- Value expectedSum() { return Value(9); }
- };
-
- /** Two ints overflow. */
- class IntIntOverflow : public TypeConversionBase {
- Value summand1() { return Value(numeric_limits<int>::max()); }
- Value summand2() { return Value(10); }
- Value expectedSum() { return Value(numeric_limits<int>::max() + 10LL); }
- };
-
- /** Two ints negative overflow. */
- class IntIntNegativeOverflow : public TypeConversionBase {
- Value summand1() { return Value(-numeric_limits<int>::max()); }
- Value summand2() { return Value(-10); }
- Value expectedSum() { return Value(-numeric_limits<int>::max() + -10LL); }
- };
-
- /** An int and a long are summed. */
- class IntLong : public TypeConversionBase {
- Value summand1() { return Value(4); }
- Value summand2() { return Value(5LL); }
- Value expectedSum() { return Value(9LL); }
- };
-
- /** An int and a long do not trigger an int overflow. */
- class IntLongNoIntOverflow : public TypeConversionBase {
- Value summand1() { return Value(numeric_limits<int>::max()); }
- Value summand2() { return Value(1LL); }
- Value expectedSum() { return Value((long long)numeric_limits<int>::max() + 1); }
- };
-
- /** An int and a long overflow. */
- class IntLongLongOverflow : public TypeConversionBase {
- Value summand1() { return Value(1); }
- Value summand2() { return Value(numeric_limits<long long>::max()); }
- Value expectedSum() { return Value(numeric_limits<long long>::max() + 1); }
- };
-
- /** Two longs are summed. */
- class LongLong : public TypeConversionBase {
- Value summand1() { return Value(4LL); }
- Value summand2() { return Value(5LL); }
- Value expectedSum() { return Value(9LL); }
- };
-
- /** Two longs overflow. */
- class LongLongOverflow : public TypeConversionBase {
- Value summand1() { return Value(numeric_limits<long long>::max()); }
- Value summand2() { return Value(numeric_limits<long long>::max()); }
- Value expectedSum() {
- return Value(numeric_limits<long long>::max()
- + numeric_limits<long long>::max());
- }
- };
-
- /** An int and a double are summed. */
- class IntDouble : public TypeConversionBase {
- Value summand1() { return Value(4); }
- Value summand2() { return Value(5.5); }
- Value expectedSum() { return Value(9.5); }
- };
-
- /** An int and a NaN double are summed. */
- class IntNanDouble : public TypeConversionBase {
- Value summand1() { return Value(4); }
- Value summand2() { return Value(numeric_limits<double>::quiet_NaN()); }
- Value expectedSum() {
- // BSON compares NaN values as equal.
- return Value(numeric_limits<double>::quiet_NaN());
- }
- };
-
- /** An int and a NaN sum to NaN. */
- class IntDoubleNoIntOverflow : public TypeConversionBase {
- Value summand1() { return Value(numeric_limits<int>::max()); }
- Value summand2() { return Value(1.0); }
- Value expectedSum() {
- return Value((long long)numeric_limits<int>::max() + 1.0);
- }
- };
-
- /** A long and a double are summed. */
- class LongDouble : public TypeConversionBase {
- Value summand1() { return Value(4LL); }
- Value summand2() { return Value(5.5); }
- Value expectedSum() { return Value(9.5); }
- };
-
- /** A long and a double do not trigger a long overflow. */
- class LongDoubleNoLongOverflow : public TypeConversionBase {
- Value summand1() { return Value(numeric_limits<long long>::max()); }
- Value summand2() { return Value(1.0); }
- Value expectedSum() {
- return Value((long long)numeric_limits<long long>::max() + 1.0);
- }
- };
-
- /** Two double values are summed. */
- class DoubleDouble : public TypeConversionBase {
- Value summand1() { return Value(2.5); }
- Value summand2() { return Value(5.5); }
- Value expectedSum() { return Value(8.0); }
- };
-
- /** Two double values overflow. */
- class DoubleDoubleOverflow : public TypeConversionBase {
- Value summand1() { return Value(numeric_limits<double>::max()); }
- Value summand2() { return Value(numeric_limits<double>::max()); }
- Value expectedSum() { return Value(numeric_limits<double>::infinity()); }
- };
-
- /** Three values, an int, a long, and a double, are summed. */
- class IntLongDouble : public TypeConversionBase {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(5), false);
- accumulator()->process(Value(99), false);
- accumulator()->process(Value(0.2), false);
- checkSum();
- }
- private:
- Value expectedSum() { return Value(104.2); }
- };
-
- /** A negative value is summed. */
- class Negative : public TypeConversionBase {
- Value summand1() { return Value(5); }
- Value summand2() { return Value(-8.8); }
- Value expectedSum() { return Value(5 - 8.8); }
- };
-
- /** A long and a negative int are is summed. */
- class LongIntNegative : public TypeConversionBase {
- Value summand1() { return Value(5LL); }
- Value summand2() { return Value(-6); }
- Value expectedSum() { return Value(-1LL); }
- };
-
- /** A null value is summed as zero. */
- class IntNull : public TypeConversionBase {
- Value summand1() { return Value(5); }
- Value summand2() { return Value(BSONNULL); }
- Value expectedSum() { return Value(5); }
- };
-
- /** An undefined value is summed as zero. */
- class IntUndefined : public TypeConversionBase {
- Value summand1() { return Value(9); }
- Value summand2() { return Value(); }
- Value expectedSum() { return Value(9); }
- };
-
- /** Two large integers do not overflow if a double is added later. */
- class NoOverflowBeforeDouble : public TypeConversionBase {
- public:
- void run() {
- createAccumulator();
- accumulator()->process(Value(numeric_limits<long long>::max()), false);
- accumulator()->process(Value(numeric_limits<long long>::max()), false);
- accumulator()->process(Value(1.0), false);
- checkSum();
- }
- private:
- Value expectedSum() {
- return Value((double)numeric_limits<long long>::max()
- + (double)numeric_limits<long long>::max());
- }
- };
-
- } // namespace Sum
-
- class All : public Suite {
- public:
- All() : Suite( "accumulator" ) {
- }
- void setupTests() {
- add<Avg::None>();
- add<Avg::OneInt>();
- add<Avg::OneLong>();
- add<Avg::OneDouble>();
- add<Avg::IntInt>();
- add<Avg::IntDouble>();
- add<Avg::IntIntNoOverflow>();
- add<Avg::LongLongOverflow>();
- add<Avg::Shard::Int>();
- add<Avg::Shard::Long>();
- add<Avg::Shard::Double>();
- add<Avg::Shard::IntIntOverflow>();
- add<Avg::Shard::IntLong>();
- add<Avg::Shard::IntDouble>();
- add<Avg::Shard::LongDouble>();
- add<Avg::Shard::IntLongDouble>();
- add<Avg::Router::OneShard>();
- add<Avg::Router::TwoShards>();
-
- add<First::None>();
- add<First::One>();
- add<First::Missing>();
- add<First::Two>();
- add<First::FirstMissing>();
-
- add<Last::None>();
- add<Last::One>();
- add<Last::Missing>();
- add<Last::Two>();
- add<Last::LastMissing>();
-
- add<Min::None>();
- add<Min::One>();
- add<Min::Missing>();
- add<Min::Two>();
- add<Min::LastMissing>();
-
- add<Max::None>();
- add<Max::One>();
- add<Max::Missing>();
- add<Max::Two>();
- add<Max::LastMissing>();
-
- add<Sum::None>();
- add<Sum::OneInt>();
- add<Sum::OneLong>();
- add<Sum::OneLageLong>();
- add<Sum::OneDouble>();
- add<Sum::OneFractionalDouble>();
- add<Sum::OneNanDouble>();
- add<Sum::IntInt>();
- add<Sum::IntIntOverflow>();
- add<Sum::IntIntNegativeOverflow>();
- add<Sum::IntLong>();
- add<Sum::IntLongNoIntOverflow>();
- add<Sum::IntLongLongOverflow>();
- add<Sum::LongLong>();
- add<Sum::LongLongOverflow>();
- add<Sum::IntDouble>();
- add<Sum::IntNanDouble>();
- add<Sum::IntDoubleNoIntOverflow>();
- add<Sum::LongDouble>();
- add<Sum::LongDoubleNoLongOverflow>();
- add<Sum::DoubleDouble>();
- add<Sum::DoubleDoubleOverflow>();
- add<Sum::IntLongDouble>();
- add<Sum::Negative>();
- add<Sum::LongIntNegative>();
- add<Sum::IntNull>();
- add<Sum::IntUndefined>();
- add<Sum::NoOverflowBeforeDouble>();
- }
- };
-
- SuiteInstance<All> myall;
-
-} // namespace AccumulatorTests
+using boost::intrusive_ptr;
+using std::numeric_limits;
+using std::string;
+
+class Base {
+protected:
+ BSONObj fromDocument(const Document& document) {
+ return document.toBson();
+ }
+ BSONObj fromValue(const Value& value) {
+ BSONObjBuilder bob;
+ value.addToBsonObj(&bob, "");
+ return bob.obj();
+ }
+ /** Check binary equality, ensuring use of the same numeric types. */
+ void assertBinaryEqual(const BSONObj& expected, const BSONObj& actual) const {
+ ASSERT_EQUALS(expected, actual);
+ ASSERT(expected.binaryEqual(actual));
+ }
+
+private:
+ intrusive_ptr<ExpressionContext> _shard;
+ intrusive_ptr<ExpressionContext> _router;
+};
+
+namespace Avg {
+
+class Base : public AccumulatorTests::Base {
+public:
+ virtual ~Base() {}
+
+protected:
+ void createAccumulator() {
+ _accumulator = AccumulatorAvg::create();
+ ASSERT_EQUALS(string("$avg"), _accumulator->getOpName());
+ }
+ Accumulator* accumulator() {
+ return _accumulator.get();
+ }
+
+private:
+ intrusive_ptr<Accumulator> _accumulator;
+};
+
+/** No documents evaluated. */
+class None : public Base {
+public:
+ void run() {
+ createAccumulator();
+ ASSERT_EQUALS(0, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** One int value is converted to double. */
+class OneInt : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(3), false);
+ ASSERT_EQUALS(3, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** One long value is converted to double. */
+class OneLong : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(-4LL), false);
+ ASSERT_EQUALS(-4, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** One double value. */
+class OneDouble : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(22.6), false);
+ ASSERT_EQUALS(22.6, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** The average of two ints is an int, even if inexact. */
+class IntInt : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(10), false);
+ accumulator()->process(Value(11), false);
+ ASSERT_EQUALS(10.5, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** The average of an int and a double is calculated as a double. */
+class IntDouble : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(10), false);
+ accumulator()->process(Value(11.0), false);
+ ASSERT_EQUALS(10.5, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** Unlike $sum, two ints do not overflow in the 'total' portion of the average. */
+class IntIntNoOverflow : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(numeric_limits<int>::max()), false);
+ accumulator()->process(Value(numeric_limits<int>::max()), false);
+ ASSERT_EQUALS(numeric_limits<int>::max(), accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** Two longs do overflow in the 'total' portion of the average. */
+class LongLongOverflow : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(numeric_limits<long long>::max()), false);
+ accumulator()->process(Value(numeric_limits<long long>::max()), false);
+ ASSERT_EQUALS(
+ ((double)numeric_limits<long long>::max() + numeric_limits<long long>::max()) / 2.0,
+ accumulator()->getValue(false).getDouble());
+ }
+};
+
+namespace Shard {
+class SingleOperandBase : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(operand(), false);
+ assertBinaryEqual(expectedResult(),
+ fromDocument(accumulator()->getValue(true).getDocument()));
+ }
+
+protected:
+ virtual Value operand() = 0;
+ virtual BSONObj expectedResult() = 0;
+};
+
+/** Shard result for one integer. */
+class Int : public SingleOperandBase {
+ Value operand() {
+ return Value(3);
+ }
+ BSONObj expectedResult() {
+ return BSON("subTotal" << 3.0 << "count" << 1LL);
+ }
+};
+
+/** Shard result for one long. */
+class Long : public SingleOperandBase {
+ Value operand() {
+ return Value(5LL);
+ }
+ BSONObj expectedResult() {
+ return BSON("subTotal" << 5.0 << "count" << 1LL);
+ }
+};
+
+/** Shard result for one double. */
+class Double : public SingleOperandBase {
+ Value operand() {
+ return Value(116.0);
+ }
+ BSONObj expectedResult() {
+ return BSON("subTotal" << 116.0 << "count" << 1LL);
+ }
+};
+
+class TwoOperandBase : public Base {
+public:
+ void run() {
+ checkAvg(operand1(), operand2());
+ checkAvg(operand2(), operand1());
+ }
+
+protected:
+ virtual Value operand1() = 0;
+ virtual Value operand2() = 0;
+ virtual BSONObj expectedResult() = 0;
+
+private:
+ void checkAvg(const Value& a, const Value& b) {
+ createAccumulator();
+ accumulator()->process(a, false);
+ accumulator()->process(b, false);
+ assertBinaryEqual(expectedResult(),
+ fromDocument(accumulator()->getValue(true).getDocument()));
+ }
+};
+
+/** Shard two ints overflow. */
+class IntIntOverflow : public TwoOperandBase {
+ Value operand1() {
+ return Value(numeric_limits<int>::max());
+ }
+ Value operand2() {
+ return Value(3);
+ }
+ BSONObj expectedResult() {
+ return BSON("subTotal" << numeric_limits<int>::max() + 3.0 << "count" << 2LL);
+ }
+};
+
+/** Shard avg an int and a long. */
+class IntLong : public TwoOperandBase {
+ Value operand1() {
+ return Value(5);
+ }
+ Value operand2() {
+ return Value(3LL);
+ }
+ BSONObj expectedResult() {
+ return BSON("subTotal" << 8.0 << "count" << 2LL);
+ }
+};
+
+/** Shard avg an int and a double. */
+class IntDouble : public TwoOperandBase {
+ Value operand1() {
+ return Value(5);
+ }
+ Value operand2() {
+ return Value(6.2);
+ }
+ BSONObj expectedResult() {
+ return BSON("subTotal" << 11.2 << "count" << 2LL);
+ }
+};
+
+/** Shard avg a long and a double. */
+class LongDouble : public TwoOperandBase {
+ Value operand1() {
+ return Value(5LL);
+ }
+ Value operand2() {
+ return Value(1.0);
+ }
+ BSONObj expectedResult() {
+ return BSON("subTotal" << 6.0 << "count" << 2LL);
+ }
+};
+
+/** Shard avg an int, long, and double. */
+class IntLongDouble : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(1), false);
+ accumulator()->process(Value(2LL), false);
+ accumulator()->process(Value(4.0), false);
+ assertBinaryEqual(BSON("subTotal" << 7.0 << "count" << 3LL),
+ fromDocument(accumulator()->getValue(true).getDocument()));
+ }
+};
+
+} // namespace Shard
+
+namespace Router {
+/** Router result from one shard. */
+class OneShard : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(DOC("subTotal" << 3.0 << "count" << 2LL)), true);
+ assertBinaryEqual(BSON("" << 3.0 / 2), fromValue(accumulator()->getValue(false)));
+ }
+};
+
+/** Router result from two shards. */
+class TwoShards : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(DOC("subTotal" << 6.0 << "count" << 1LL)), true);
+ accumulator()->process(Value(DOC("subTotal" << 5.0 << "count" << 2LL)), true);
+ assertBinaryEqual(BSON("" << 11.0 / 3), fromValue(accumulator()->getValue(false)));
+ }
+};
+
+} // namespace Router
+
+} // namespace Avg
+
+namespace First {
+
+class Base : public AccumulatorTests::Base {
+protected:
+ void createAccumulator() {
+ _accumulator = AccumulatorFirst::create();
+ ASSERT_EQUALS(string("$first"), _accumulator->getOpName());
+ }
+ Accumulator* accumulator() {
+ return _accumulator.get();
+ }
+
+private:
+ intrusive_ptr<Accumulator> _accumulator;
+};
+
+/** The accumulator evaluates no documents. */
+class None : public Base {
+public:
+ void run() {
+ createAccumulator();
+ // The accumulator returns no value in this case.
+ ASSERT(accumulator()->getValue(false).missing());
+ }
+};
+
+/* The accumulator evaluates one document and retains its value. */
+class One : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ ASSERT_EQUALS(5, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates one document with the field missing, returns missing value. */
+class Missing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(), false);
+ ASSERT_EQUALS(EOO, accumulator()->getValue(false).getType());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the value in the first. */
+class Two : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ accumulator()->process(Value(7), false);
+ ASSERT_EQUALS(5, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the missing value in the first. */
+class FirstMissing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(), false);
+ accumulator()->process(Value(7), false);
+ ASSERT_EQUALS(EOO, accumulator()->getValue(false).getType());
+ }
+};
+
+} // namespace First
+
+namespace Last {
+
+class Base : public AccumulatorTests::Base {
+protected:
+ void createAccumulator() {
+ _accumulator = AccumulatorLast::create();
+ ASSERT_EQUALS(string("$last"), _accumulator->getOpName());
+ }
+ Accumulator* accumulator() {
+ return _accumulator.get();
+ }
+
+private:
+ intrusive_ptr<Accumulator> _accumulator;
+};
+
+/** The accumulator evaluates no documents. */
+class None : public Base {
+public:
+ void run() {
+ createAccumulator();
+ // The accumulator returns no value in this case.
+ ASSERT(accumulator()->getValue(false).missing());
+ }
+};
+
+/* The accumulator evaluates one document and retains its value. */
+class One : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ ASSERT_EQUALS(5, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates one document with the field missing retains undefined. */
+class Missing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(), false);
+ ASSERT_EQUALS(EOO, accumulator()->getValue(false).getType());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the value in the last. */
+class Two : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ accumulator()->process(Value(7), false);
+ ASSERT_EQUALS(7, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the undefined value in the last. */
+class LastMissing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(7), false);
+ accumulator()->process(Value(), false);
+ ASSERT_EQUALS(EOO, accumulator()->getValue(false).getType());
+ }
+};
+
+} // namespace Last
+
+namespace Min {
+
+class Base : public AccumulatorTests::Base {
+protected:
+ void createAccumulator() {
+ _accumulator = AccumulatorMinMax::createMin();
+ ASSERT_EQUALS(string("$min"), _accumulator->getOpName());
+ }
+ Accumulator* accumulator() {
+ return _accumulator.get();
+ }
+
+private:
+ intrusive_ptr<Accumulator> _accumulator;
+};
+
+/** The accumulator evaluates no documents. */
+class None : public Base {
+public:
+ void run() {
+ createAccumulator();
+ // The accumulator returns no value in this case.
+ ASSERT(accumulator()->getValue(false).missing());
+ }
+};
+
+/* The accumulator evaluates one document and retains its value. */
+class One : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ ASSERT_EQUALS(5, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates one document with the field missing retains undefined. */
+class Missing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(), false);
+ ASSERT_EQUALS(EOO, accumulator()->getValue(false).getType());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the minimum value. */
+class Two : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ accumulator()->process(Value(7), false);
+ ASSERT_EQUALS(5, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the undefined value. */
+class LastMissing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(7), false);
+ accumulator()->process(Value(), false);
+ ASSERT_EQUALS(7, accumulator()->getValue(false).getInt());
+ }
+};
+
+} // namespace Min
+
+namespace Max {
+
+class Base : public AccumulatorTests::Base {
+protected:
+ void createAccumulator() {
+ _accumulator = AccumulatorMinMax::createMax();
+ ASSERT_EQUALS(string("$max"), _accumulator->getOpName());
+ }
+ Accumulator* accumulator() {
+ return _accumulator.get();
+ }
+
+private:
+ intrusive_ptr<Accumulator> _accumulator;
+};
+
+/** The accumulator evaluates no documents. */
+class None : public Base {
+public:
+ void run() {
+ createAccumulator();
+ // The accumulator returns no value in this case.
+ ASSERT(accumulator()->getValue(false).missing());
+ }
+};
+
+/* The accumulator evaluates one document and retains its value. */
+class One : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ ASSERT_EQUALS(5, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates one document with the field missing retains undefined. */
+class Missing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(), false);
+ ASSERT_EQUALS(EOO, accumulator()->getValue(false).getType());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the maximum value. */
+class Two : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ accumulator()->process(Value(7), false);
+ ASSERT_EQUALS(7, accumulator()->getValue(false).getInt());
+ }
+};
+
+/* The accumulator evaluates two documents and retains the defined value. */
+class LastMissing : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(7), false);
+ accumulator()->process(Value(), false);
+ ASSERT_EQUALS(7, accumulator()->getValue(false).getInt());
+ }
+};
+
+} // namespace Max
+
+namespace Sum {
+
+class Base : public AccumulatorTests::Base {
+protected:
+ void createAccumulator() {
+ _accumulator = AccumulatorSum::create();
+ ASSERT_EQUALS(string("$sum"), _accumulator->getOpName());
+ }
+ Accumulator* accumulator() {
+ return _accumulator.get();
+ }
+
+private:
+ intrusive_ptr<Accumulator> _accumulator;
+};
+
+/** No documents evaluated. */
+class None : public Base {
+public:
+ void run() {
+ createAccumulator();
+ ASSERT_EQUALS(0, accumulator()->getValue(false).getInt());
+ }
+};
+
+/** An int. */
+class OneInt : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ ASSERT_EQUALS(5, accumulator()->getValue(false).getInt());
+ }
+};
+
+/** A long. */
+class OneLong : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(6LL), false);
+ ASSERT_EQUALS(6, accumulator()->getValue(false).getLong());
+ }
+};
+
+/** A long that cannot be expressed as an int. */
+class OneLageLong : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(60000000000LL), false);
+ ASSERT_EQUALS(60000000000LL, accumulator()->getValue(false).getLong());
+ }
+};
+
+/** A double. */
+class OneDouble : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(7.0), false);
+ ASSERT_EQUALS(7.0, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** A non integer valued double. */
+class OneFractionalDouble : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(7.5), false);
+ ASSERT_EQUALS(7.5, accumulator()->getValue(false).getDouble());
+ }
+};
+
+/** A nan double. */
+class OneNanDouble : public Base {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(numeric_limits<double>::quiet_NaN()), false);
+ // NaN is unequal to itself.
+ ASSERT_NOT_EQUALS(accumulator()->getValue(false).getDouble(),
+ accumulator()->getValue(false).getDouble());
+ }
+};
+
+class TypeConversionBase : public Base {
+public:
+ virtual ~TypeConversionBase() {}
+ void run() {
+ checkPairSum(summand1(), summand2());
+ checkPairSum(summand2(), summand1());
+ }
+
+protected:
+ virtual Value summand1() {
+ verify(false);
+ }
+ virtual Value summand2() {
+ verify(false);
+ }
+ virtual Value expectedSum() = 0;
+ void checkPairSum(Value first, Value second) {
+ createAccumulator();
+ accumulator()->process(first, false);
+ accumulator()->process(second, false);
+ checkSum();
+ }
+ void checkSum() {
+ Value result = accumulator()->getValue(false);
+ ASSERT_EQUALS(expectedSum(), result);
+ ASSERT_EQUALS(expectedSum().getType(), result.getType());
+ }
+};
+
+/** Two ints are summed. */
+class IntInt : public TypeConversionBase {
+ Value summand1() {
+ return Value(4);
+ }
+ Value summand2() {
+ return Value(5);
+ }
+ Value expectedSum() {
+ return Value(9);
+ }
+};
+
+/** Two ints overflow. */
+class IntIntOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(numeric_limits<int>::max());
+ }
+ Value summand2() {
+ return Value(10);
+ }
+ Value expectedSum() {
+ return Value(numeric_limits<int>::max() + 10LL);
+ }
+};
+
+/** Two ints negative overflow. */
+class IntIntNegativeOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(-numeric_limits<int>::max());
+ }
+ Value summand2() {
+ return Value(-10);
+ }
+ Value expectedSum() {
+ return Value(-numeric_limits<int>::max() + -10LL);
+ }
+};
+
+/** An int and a long are summed. */
+class IntLong : public TypeConversionBase {
+ Value summand1() {
+ return Value(4);
+ }
+ Value summand2() {
+ return Value(5LL);
+ }
+ Value expectedSum() {
+ return Value(9LL);
+ }
+};
+
+/** An int and a long do not trigger an int overflow. */
+class IntLongNoIntOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(numeric_limits<int>::max());
+ }
+ Value summand2() {
+ return Value(1LL);
+ }
+ Value expectedSum() {
+ return Value((long long)numeric_limits<int>::max() + 1);
+ }
+};
+
+/** An int and a long overflow. */
+class IntLongLongOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(1);
+ }
+ Value summand2() {
+ return Value(numeric_limits<long long>::max());
+ }
+ Value expectedSum() {
+ return Value(numeric_limits<long long>::max() + 1);
+ }
+};
+
+/** Two longs are summed. */
+class LongLong : public TypeConversionBase {
+ Value summand1() {
+ return Value(4LL);
+ }
+ Value summand2() {
+ return Value(5LL);
+ }
+ Value expectedSum() {
+ return Value(9LL);
+ }
+};
+
+/** Two longs overflow. */
+class LongLongOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(numeric_limits<long long>::max());
+ }
+ Value summand2() {
+ return Value(numeric_limits<long long>::max());
+ }
+ Value expectedSum() {
+ return Value(numeric_limits<long long>::max() + numeric_limits<long long>::max());
+ }
+};
+
+/** An int and a double are summed. */
+class IntDouble : public TypeConversionBase {
+ Value summand1() {
+ return Value(4);
+ }
+ Value summand2() {
+ return Value(5.5);
+ }
+ Value expectedSum() {
+ return Value(9.5);
+ }
+};
+
+/** An int and a NaN double are summed. */
+class IntNanDouble : public TypeConversionBase {
+ Value summand1() {
+ return Value(4);
+ }
+ Value summand2() {
+ return Value(numeric_limits<double>::quiet_NaN());
+ }
+ Value expectedSum() {
+ // BSON compares NaN values as equal.
+ return Value(numeric_limits<double>::quiet_NaN());
+ }
+};
+
+/** An int and a NaN sum to NaN. */
+class IntDoubleNoIntOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(numeric_limits<int>::max());
+ }
+ Value summand2() {
+ return Value(1.0);
+ }
+ Value expectedSum() {
+ return Value((long long)numeric_limits<int>::max() + 1.0);
+ }
+};
+
+/** A long and a double are summed. */
+class LongDouble : public TypeConversionBase {
+ Value summand1() {
+ return Value(4LL);
+ }
+ Value summand2() {
+ return Value(5.5);
+ }
+ Value expectedSum() {
+ return Value(9.5);
+ }
+};
+
+/** A long and a double do not trigger a long overflow. */
+class LongDoubleNoLongOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(numeric_limits<long long>::max());
+ }
+ Value summand2() {
+ return Value(1.0);
+ }
+ Value expectedSum() {
+ return Value((long long)numeric_limits<long long>::max() + 1.0);
+ }
+};
+
+/** Two double values are summed. */
+class DoubleDouble : public TypeConversionBase {
+ Value summand1() {
+ return Value(2.5);
+ }
+ Value summand2() {
+ return Value(5.5);
+ }
+ Value expectedSum() {
+ return Value(8.0);
+ }
+};
+
+/** Two double values overflow. */
+class DoubleDoubleOverflow : public TypeConversionBase {
+ Value summand1() {
+ return Value(numeric_limits<double>::max());
+ }
+ Value summand2() {
+ return Value(numeric_limits<double>::max());
+ }
+ Value expectedSum() {
+ return Value(numeric_limits<double>::infinity());
+ }
+};
+
+/** Three values, an int, a long, and a double, are summed. */
+class IntLongDouble : public TypeConversionBase {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(5), false);
+ accumulator()->process(Value(99), false);
+ accumulator()->process(Value(0.2), false);
+ checkSum();
+ }
+
+private:
+ Value expectedSum() {
+ return Value(104.2);
+ }
+};
+
+/** A negative value is summed. */
+class Negative : public TypeConversionBase {
+ Value summand1() {
+ return Value(5);
+ }
+ Value summand2() {
+ return Value(-8.8);
+ }
+ Value expectedSum() {
+ return Value(5 - 8.8);
+ }
+};
+
+/** A long and a negative int are is summed. */
+class LongIntNegative : public TypeConversionBase {
+ Value summand1() {
+ return Value(5LL);
+ }
+ Value summand2() {
+ return Value(-6);
+ }
+ Value expectedSum() {
+ return Value(-1LL);
+ }
+};
+
+/** A null value is summed as zero. */
+class IntNull : public TypeConversionBase {
+ Value summand1() {
+ return Value(5);
+ }
+ Value summand2() {
+ return Value(BSONNULL);
+ }
+ Value expectedSum() {
+ return Value(5);
+ }
+};
+
+/** An undefined value is summed as zero. */
+class IntUndefined : public TypeConversionBase {
+ Value summand1() {
+ return Value(9);
+ }
+ Value summand2() {
+ return Value();
+ }
+ Value expectedSum() {
+ return Value(9);
+ }
+};
+
+/** Two large integers do not overflow if a double is added later. */
+class NoOverflowBeforeDouble : public TypeConversionBase {
+public:
+ void run() {
+ createAccumulator();
+ accumulator()->process(Value(numeric_limits<long long>::max()), false);
+ accumulator()->process(Value(numeric_limits<long long>::max()), false);
+ accumulator()->process(Value(1.0), false);
+ checkSum();
+ }
+
+private:
+ Value expectedSum() {
+ return Value((double)numeric_limits<long long>::max() +
+ (double)numeric_limits<long long>::max());
+ }
+};
+
+} // namespace Sum
+
+class All : public Suite {
+public:
+ All() : Suite("accumulator") {}
+ void setupTests() {
+ add<Avg::None>();
+ add<Avg::OneInt>();
+ add<Avg::OneLong>();
+ add<Avg::OneDouble>();
+ add<Avg::IntInt>();
+ add<Avg::IntDouble>();
+ add<Avg::IntIntNoOverflow>();
+ add<Avg::LongLongOverflow>();
+ add<Avg::Shard::Int>();
+ add<Avg::Shard::Long>();
+ add<Avg::Shard::Double>();
+ add<Avg::Shard::IntIntOverflow>();
+ add<Avg::Shard::IntLong>();
+ add<Avg::Shard::IntDouble>();
+ add<Avg::Shard::LongDouble>();
+ add<Avg::Shard::IntLongDouble>();
+ add<Avg::Router::OneShard>();
+ add<Avg::Router::TwoShards>();
+
+ add<First::None>();
+ add<First::One>();
+ add<First::Missing>();
+ add<First::Two>();
+ add<First::FirstMissing>();
+
+ add<Last::None>();
+ add<Last::One>();
+ add<Last::Missing>();
+ add<Last::Two>();
+ add<Last::LastMissing>();
+
+ add<Min::None>();
+ add<Min::One>();
+ add<Min::Missing>();
+ add<Min::Two>();
+ add<Min::LastMissing>();
+
+ add<Max::None>();
+ add<Max::One>();
+ add<Max::Missing>();
+ add<Max::Two>();
+ add<Max::LastMissing>();
+
+ add<Sum::None>();
+ add<Sum::OneInt>();
+ add<Sum::OneLong>();
+ add<Sum::OneLageLong>();
+ add<Sum::OneDouble>();
+ add<Sum::OneFractionalDouble>();
+ add<Sum::OneNanDouble>();
+ add<Sum::IntInt>();
+ add<Sum::IntIntOverflow>();
+ add<Sum::IntIntNegativeOverflow>();
+ add<Sum::IntLong>();
+ add<Sum::IntLongNoIntOverflow>();
+ add<Sum::IntLongLongOverflow>();
+ add<Sum::LongLong>();
+ add<Sum::LongLongOverflow>();
+ add<Sum::IntDouble>();
+ add<Sum::IntNanDouble>();
+ add<Sum::IntDoubleNoIntOverflow>();
+ add<Sum::LongDouble>();
+ add<Sum::LongDoubleNoLongOverflow>();
+ add<Sum::DoubleDouble>();
+ add<Sum::DoubleDoubleOverflow>();
+ add<Sum::IntLongDouble>();
+ add<Sum::Negative>();
+ add<Sum::LongIntNegative>();
+ add<Sum::IntNull>();
+ add<Sum::IntUndefined>();
+ add<Sum::NoOverflowBeforeDouble>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace AccumulatorTests
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index f31b8021fe5..4c7762e3d3a 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -49,353 +49,351 @@
namespace BasicTests {
- using boost::scoped_ptr;
- using boost::shared_ptr;
- using std::cout;
- using std::dec;
- using std::endl;
- using std::hex;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- class Rarely {
- public:
- void run() {
- int first = 0;
- int second = 0;
- int third = 0;
- for( int i = 0; i < 128; ++i ) {
- incRarely( first );
- incRarely2( second );
- ONCE ++third;
- }
- ASSERT_EQUALS( 1, first );
- ASSERT_EQUALS( 1, second );
- ASSERT_EQUALS( 1, third );
- }
- private:
- void incRarely( int &c ) {
- RARELY ++c;
- }
- void incRarely2( int &c ) {
- RARELY ++c;
+using boost::scoped_ptr;
+using boost::shared_ptr;
+using std::cout;
+using std::dec;
+using std::endl;
+using std::hex;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+class Rarely {
+public:
+ void run() {
+ int first = 0;
+ int second = 0;
+ int third = 0;
+ for (int i = 0; i < 128; ++i) {
+ incRarely(first);
+ incRarely2(second);
+ ONCE++ third;
}
- };
-
- class Base64Tests {
- public:
+ ASSERT_EQUALS(1, first);
+ ASSERT_EQUALS(1, second);
+ ASSERT_EQUALS(1, third);
+ }
- void roundTrip( string s ) {
- ASSERT_EQUALS( s , base64::decode( base64::encode( s ) ) );
- }
+private:
+ void incRarely(int& c) {
+ RARELY++ c;
+ }
+ void incRarely2(int& c) {
+ RARELY++ c;
+ }
+};
- void roundTrip( const unsigned char * _data , int len ) {
- const char *data = (const char *) _data;
- string s = base64::encode( data , len );
- string out = base64::decode( s );
- ASSERT_EQUALS( out.size() , static_cast<size_t>(len) );
- bool broke = false;
- for ( int i=0; i<len; i++ ) {
- if ( data[i] != out[i] )
- broke = true;
- }
- if ( ! broke )
- return;
-
- cout << s << endl;
- for ( int i=0; i<len; i++ )
- cout << hex << ( data[i] & 0xFF ) << dec << " ";
- cout << endl;
- for ( int i=0; i<len; i++ )
- cout << hex << ( out[i] & 0xFF ) << dec << " ";
- cout << endl;
-
- ASSERT(0);
- }
+class Base64Tests {
+public:
+ void roundTrip(string s) {
+ ASSERT_EQUALS(s, base64::decode(base64::encode(s)));
+ }
- void run() {
-
- ASSERT_EQUALS( "ZWxp" , base64::encode( "eli" , 3 ) );
- ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" , 6 ) );
- ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" ) );
-
- ASSERT_EQUALS( "ZQ==" , base64::encode( "e" , 1 ) );
- ASSERT_EQUALS( "ZWw=" , base64::encode( "el" , 2 ) );
-
- roundTrip( "e" );
- roundTrip( "el" );
- roundTrip( "eli" );
- roundTrip( "elio" );
- roundTrip( "eliot" );
- roundTrip( "eliots" );
- roundTrip( "eliotsz" );
-
- unsigned char z[] = { 0x1 , 0x2 , 0x3 , 0x4 };
- roundTrip( z , 4 );
-
- unsigned char y[] = {
- 0x01, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
- 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
- 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
- 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
- 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
- };
- roundTrip( y , 4 );
- roundTrip( y , 40 );
+ void roundTrip(const unsigned char* _data, int len) {
+ const char* data = (const char*)_data;
+ string s = base64::encode(data, len);
+ string out = base64::decode(s);
+ ASSERT_EQUALS(out.size(), static_cast<size_t>(len));
+ bool broke = false;
+ for (int i = 0; i < len; i++) {
+ if (data[i] != out[i])
+ broke = true;
}
- };
-
- namespace stringbuildertests {
-#define SBTGB(x) ss << (x); sb << (x);
-
- class Base {
- virtual void pop() = 0;
+ if (!broke)
+ return;
+
+ cout << s << endl;
+ for (int i = 0; i < len; i++)
+ cout << hex << (data[i] & 0xFF) << dec << " ";
+ cout << endl;
+ for (int i = 0; i < len; i++)
+ cout << hex << (out[i] & 0xFF) << dec << " ";
+ cout << endl;
+
+ ASSERT(0);
+ }
- public:
- Base() {}
- virtual ~Base() {}
+ void run() {
+ ASSERT_EQUALS("ZWxp", base64::encode("eli", 3));
+ ASSERT_EQUALS("ZWxpb3Rz", base64::encode("eliots", 6));
+ ASSERT_EQUALS("ZWxpb3Rz", base64::encode("eliots"));
+
+ ASSERT_EQUALS("ZQ==", base64::encode("e", 1));
+ ASSERT_EQUALS("ZWw=", base64::encode("el", 2));
+
+ roundTrip("e");
+ roundTrip("el");
+ roundTrip("eli");
+ roundTrip("elio");
+ roundTrip("eliot");
+ roundTrip("eliots");
+ roundTrip("eliotsz");
+
+ unsigned char z[] = {0x1, 0x2, 0x3, 0x4};
+ roundTrip(z, 4);
+
+ unsigned char y[] = {0x01, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF};
+ roundTrip(y, 4);
+ roundTrip(y, 40);
+ }
+};
- void run() {
- pop();
- ASSERT_EQUALS( ss.str() , sb.str() );
- }
+namespace stringbuildertests {
+#define SBTGB(x) \
+ ss << (x); \
+ sb << (x);
- stringstream ss;
- StringBuilder sb;
- };
+class Base {
+ virtual void pop() = 0;
- class simple1 : public Base {
- void pop() {
- SBTGB(1);
- SBTGB("yo");
- SBTGB(2);
- }
- };
-
- class simple2 : public Base {
- void pop() {
- SBTGB(1);
- SBTGB("yo");
- SBTGB(2);
- SBTGB( 12123123123LL );
- SBTGB( "xxx" );
- SBTGB( 5.4 );
- SBTGB( 5.4312 );
- SBTGB( "yyy" );
- SBTGB( (short)5 );
- SBTGB( (short)(1231231231231LL) );
- }
- };
-
- class reset1 {
- public:
- void run() {
- StringBuilder sb;
- sb << "1" << "abc" << "5.17";
- ASSERT_EQUALS( "1abc5.17" , sb.str() );
- ASSERT_EQUALS( "1abc5.17" , sb.str() );
- sb.reset();
- ASSERT_EQUALS( "" , sb.str() );
- sb << "999";
- ASSERT_EQUALS( "999" , sb.str() );
- }
- };
-
- class reset2 {
- public:
- void run() {
- StringBuilder sb;
- sb << "1" << "abc" << "5.17";
- ASSERT_EQUALS( "1abc5.17" , sb.str() );
- ASSERT_EQUALS( "1abc5.17" , sb.str() );
- sb.reset(1);
- ASSERT_EQUALS( "" , sb.str() );
- sb << "999";
- ASSERT_EQUALS( "999" , sb.str() );
- }
- };
+public:
+ Base() {}
+ virtual ~Base() {}
+ void run() {
+ pop();
+ ASSERT_EQUALS(ss.str(), sb.str());
}
- class sleeptest {
- public:
-
- void run() {
- Timer t;
- int matches = 0;
- for( int p = 0; p < 3; p++ ) {
- sleepsecs( 1 );
- int sec = (t.millis() + 2)/1000;
- if( sec == 1 )
- matches++;
- else
- mongo::unittest::log() << "temp millis: " << t.millis() << endl;
- ASSERT( sec >= 0 && sec <= 2 );
- t.reset();
- }
- if ( matches < 2 )
- mongo::unittest::log() << "matches:" << matches << endl;
- ASSERT( matches >= 2 );
-
- sleepmicros( 1527123 );
- ASSERT( t.micros() > 1000000 );
- ASSERT( t.micros() < 2000000 );
+ stringstream ss;
+ StringBuilder sb;
+};
+class simple1 : public Base {
+ void pop() {
+ SBTGB(1);
+ SBTGB("yo");
+ SBTGB(2);
+ }
+};
+
+class simple2 : public Base {
+ void pop() {
+ SBTGB(1);
+ SBTGB("yo");
+ SBTGB(2);
+ SBTGB(12123123123LL);
+ SBTGB("xxx");
+ SBTGB(5.4);
+ SBTGB(5.4312);
+ SBTGB("yyy");
+ SBTGB((short)5);
+ SBTGB((short)(1231231231231LL));
+ }
+};
+
+class reset1 {
+public:
+ void run() {
+ StringBuilder sb;
+ sb << "1"
+ << "abc"
+ << "5.17";
+ ASSERT_EQUALS("1abc5.17", sb.str());
+ ASSERT_EQUALS("1abc5.17", sb.str());
+ sb.reset();
+ ASSERT_EQUALS("", sb.str());
+ sb << "999";
+ ASSERT_EQUALS("999", sb.str());
+ }
+};
+
+class reset2 {
+public:
+ void run() {
+ StringBuilder sb;
+ sb << "1"
+ << "abc"
+ << "5.17";
+ ASSERT_EQUALS("1abc5.17", sb.str());
+ ASSERT_EQUALS("1abc5.17", sb.str());
+ sb.reset(1);
+ ASSERT_EQUALS("", sb.str());
+ sb << "999";
+ ASSERT_EQUALS("999", sb.str());
+ }
+};
+}
+
+class sleeptest {
+public:
+ void run() {
+ Timer t;
+ int matches = 0;
+ for (int p = 0; p < 3; p++) {
+ sleepsecs(1);
+ int sec = (t.millis() + 2) / 1000;
+ if (sec == 1)
+ matches++;
+ else
+ mongo::unittest::log() << "temp millis: " << t.millis() << endl;
+ ASSERT(sec >= 0 && sec <= 2);
t.reset();
- sleepmillis( 1727 );
- ASSERT( t.millis() >= 1000 );
- ASSERT( t.millis() <= 2500 );
-
+ }
+ if (matches < 2)
+ mongo::unittest::log() << "matches:" << matches << endl;
+ ASSERT(matches >= 2);
+
+ sleepmicros(1527123);
+ ASSERT(t.micros() > 1000000);
+ ASSERT(t.micros() < 2000000);
+
+ t.reset();
+ sleepmillis(1727);
+ ASSERT(t.millis() >= 1000);
+ ASSERT(t.millis() <= 2500);
+
+ {
+ int total = 1200;
+ int ms = 2;
+ t.reset();
+ for (int i = 0; i < (total / ms); i++) {
+ sleepmillis(ms);
+ }
{
- int total = 1200;
- int ms = 2;
- t.reset();
- for ( int i=0; i<(total/ms); i++ ) {
- sleepmillis( ms );
- }
- {
- int x = t.millis();
- if ( x < 1000 || x > 2500 ) {
- cout << "sleeptest finds sleep accuracy to be not great. x: " << x << endl;
- ASSERT( x >= 1000 );
- ASSERT( x <= 20000 );
- }
+ int x = t.millis();
+ if (x < 1000 || x > 2500) {
+ cout << "sleeptest finds sleep accuracy to be not great. x: " << x << endl;
+ ASSERT(x >= 1000);
+ ASSERT(x <= 20000);
}
}
+ }
#ifdef __linux__
+ {
+ int total = 1200;
+ int micros = 100;
+ t.reset();
+ int numSleeps = 1000 * (total / micros);
+ for (int i = 0; i < numSleeps; i++) {
+ sleepmicros(micros);
+ }
{
- int total = 1200;
- int micros = 100;
- t.reset();
- int numSleeps = 1000*(total/micros);
- for ( int i=0; i<numSleeps; i++ ) {
- sleepmicros( micros );
- }
- {
- int y = t.millis();
- if ( y < 1000 || y > 2500 ) {
- cout << "sleeptest y: " << y << endl;
- ASSERT( y >= 1000 );
- /* ASSERT( y <= 100000 ); */
- }
+ int y = t.millis();
+ if (y < 1000 || y > 2500) {
+ cout << "sleeptest y: " << y << endl;
+ ASSERT(y >= 1000);
+ /* ASSERT( y <= 100000 ); */
}
}
-#endif
-
}
+#endif
+ }
+};
+
+class SleepBackoffTest {
+public:
+ void run() {
+ int maxSleepTimeMillis = 1000;
+
+ Backoff backoff(maxSleepTimeMillis, maxSleepTimeMillis * 2);
+
+ // Double previous sleep duration
+ ASSERT_EQUALS(backoff.getNextSleepMillis(0, 0, 0), 1);
+ ASSERT_EQUALS(backoff.getNextSleepMillis(2, 0, 0), 4);
+ ASSERT_EQUALS(backoff.getNextSleepMillis(256, 0, 0), 512);
+
+ // Make sure our backoff increases to the maximum value
+ ASSERT_EQUALS(backoff.getNextSleepMillis(maxSleepTimeMillis - 200, 0, 0),
+ maxSleepTimeMillis);
+ ASSERT_EQUALS(backoff.getNextSleepMillis(maxSleepTimeMillis * 2, 0, 0), maxSleepTimeMillis);
+
+ // Make sure that our backoff gets reset if we wait much longer than the maximum wait
+ unsigned long long resetAfterMillis = maxSleepTimeMillis + maxSleepTimeMillis * 2;
+ ASSERT_EQUALS(backoff.getNextSleepMillis(20, resetAfterMillis, 0), 40); // no reset here
+ ASSERT_EQUALS(backoff.getNextSleepMillis(20, resetAfterMillis + 1, 0),
+ 1); // reset expected
+ }
+};
- };
-
- class SleepBackoffTest {
- public:
- void run() {
-
- int maxSleepTimeMillis = 1000;
-
- Backoff backoff( maxSleepTimeMillis, maxSleepTimeMillis * 2 );
-
- // Double previous sleep duration
- ASSERT_EQUALS( backoff.getNextSleepMillis( 0, 0, 0 ), 1 );
- ASSERT_EQUALS( backoff.getNextSleepMillis( 2, 0, 0 ), 4 );
- ASSERT_EQUALS( backoff.getNextSleepMillis( 256, 0, 0 ), 512 );
-
- // Make sure our backoff increases to the maximum value
- ASSERT_EQUALS( backoff.getNextSleepMillis( maxSleepTimeMillis - 200, 0, 0 ), maxSleepTimeMillis );
- ASSERT_EQUALS( backoff.getNextSleepMillis( maxSleepTimeMillis * 2, 0, 0 ), maxSleepTimeMillis );
-
- // Make sure that our backoff gets reset if we wait much longer than the maximum wait
- unsigned long long resetAfterMillis = maxSleepTimeMillis + maxSleepTimeMillis * 2;
- ASSERT_EQUALS( backoff.getNextSleepMillis( 20, resetAfterMillis, 0), 40 ); // no reset here
- ASSERT_EQUALS( backoff.getNextSleepMillis( 20, resetAfterMillis + 1, 0), 1 ); // reset expected
-
- }
- };
-
- class AssertTests {
- public:
-
- int x;
+class AssertTests {
+public:
+ int x;
- AssertTests() {
- x = 0;
- }
+ AssertTests() {
+ x = 0;
+ }
- string foo() {
- x++;
- return "";
+ string foo() {
+ x++;
+ return "";
+ }
+ void run() {
+ uassert(-1, foo(), 1);
+ if (x != 0) {
+ ASSERT_EQUALS(0, x);
}
- void run() {
- uassert( -1 , foo() , 1 );
- if( x != 0 ) {
- ASSERT_EQUALS( 0 , x );
- }
- try {
- uassert( -1 , foo() , 0 );
- }
- catch ( ... ) {}
- ASSERT_EQUALS( 1 , x );
+ try {
+ uassert(-1, foo(), 0);
+ } catch (...) {
}
- };
+ ASSERT_EQUALS(1, x);
+ }
+};
- class ThreadSafeStringTest {
- public:
- void run() {
- ThreadSafeString s;
- s = "eliot";
- ASSERT_EQUALS( s.toString() , "eliot" );
- ASSERT( s.toString() != "eliot2" );
+class ThreadSafeStringTest {
+public:
+ void run() {
+ ThreadSafeString s;
+ s = "eliot";
+ ASSERT_EQUALS(s.toString(), "eliot");
+ ASSERT(s.toString() != "eliot2");
- ThreadSafeString s2;
- s2 = s.toString().c_str();
- ASSERT_EQUALS( s2.toString() , "eliot" );
+ ThreadSafeString s2;
+ s2 = s.toString().c_str();
+ ASSERT_EQUALS(s2.toString(), "eliot");
+ {
+ string foo;
{
- string foo;
- {
- ThreadSafeString bar;
- bar = "eliot2";
- foo = bar.toString();
- }
- ASSERT_EQUALS( "eliot2" , foo );
+ ThreadSafeString bar;
+ bar = "eliot2";
+ foo = bar.toString();
}
+ ASSERT_EQUALS("eliot2", foo);
}
- };
-
- class PtrTests {
- public:
- void run() {
- scoped_ptr<int> p1 (new int(1));
- boost::shared_ptr<int> p2 (new int(2));
- scoped_ptr<const int> p3 (new int(3));
- boost::shared_ptr<const int> p4 (new int(4));
-
- //non-const
- ASSERT_EQUALS( p1.get() , ptr<int>(p1) );
- ASSERT_EQUALS( p2.get() , ptr<int>(p2) );
- ASSERT_EQUALS( p2.get() , ptr<int>(p2.get()) ); // T* constructor
- ASSERT_EQUALS( p2.get() , ptr<int>(ptr<int>(p2)) ); // copy constructor
- ASSERT_EQUALS( *p2 , *ptr<int>(p2));
- ASSERT_EQUALS( p2.get() , ptr<boost::shared_ptr<int> >(&p2)->get() ); // operator->
-
- //const
- ASSERT_EQUALS( p1.get() , ptr<const int>(p1) );
- ASSERT_EQUALS( p2.get() , ptr<const int>(p2) );
- ASSERT_EQUALS( p2.get() , ptr<const int>(p2.get()) );
- ASSERT_EQUALS( p3.get() , ptr<const int>(p3) );
- ASSERT_EQUALS( p4.get() , ptr<const int>(p4) );
- ASSERT_EQUALS( p4.get() , ptr<const int>(p4.get()) );
- ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<const int>(p2)) );
- ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<int>(p2)) ); // constizing copy constructor
- ASSERT_EQUALS( *p2 , *ptr<int>(p2));
- ASSERT_EQUALS( p2.get() , ptr<const boost::shared_ptr<int> >(&p2)->get() );
-
- //bool context
- ASSERT( ptr<int>(p1) );
- ASSERT( !ptr<int>(NULL) );
- ASSERT( !ptr<int>() );
+ }
+};
+
+class PtrTests {
+public:
+ void run() {
+ scoped_ptr<int> p1(new int(1));
+ boost::shared_ptr<int> p2(new int(2));
+ scoped_ptr<const int> p3(new int(3));
+ boost::shared_ptr<const int> p4(new int(4));
+
+ // non-const
+ ASSERT_EQUALS(p1.get(), ptr<int>(p1));
+ ASSERT_EQUALS(p2.get(), ptr<int>(p2));
+ ASSERT_EQUALS(p2.get(), ptr<int>(p2.get())); // T* constructor
+ ASSERT_EQUALS(p2.get(), ptr<int>(ptr<int>(p2))); // copy constructor
+ ASSERT_EQUALS(*p2, *ptr<int>(p2));
+ ASSERT_EQUALS(p2.get(), ptr<boost::shared_ptr<int>>(&p2)->get()); // operator->
+
+ // const
+ ASSERT_EQUALS(p1.get(), ptr<const int>(p1));
+ ASSERT_EQUALS(p2.get(), ptr<const int>(p2));
+ ASSERT_EQUALS(p2.get(), ptr<const int>(p2.get()));
+ ASSERT_EQUALS(p3.get(), ptr<const int>(p3));
+ ASSERT_EQUALS(p4.get(), ptr<const int>(p4));
+ ASSERT_EQUALS(p4.get(), ptr<const int>(p4.get()));
+ ASSERT_EQUALS(p2.get(), ptr<const int>(ptr<const int>(p2)));
+ ASSERT_EQUALS(p2.get(), ptr<const int>(ptr<int>(p2))); // constizing copy constructor
+ ASSERT_EQUALS(*p2, *ptr<int>(p2));
+ ASSERT_EQUALS(p2.get(), ptr<const boost::shared_ptr<int>>(&p2)->get());
+
+ // bool context
+ ASSERT(ptr<int>(p1));
+ ASSERT(!ptr<int>(NULL));
+ ASSERT(!ptr<int>());
#if 0
// These shouldn't compile
@@ -403,173 +401,164 @@ namespace BasicTests {
ASSERT_EQUALS( p4.get() , ptr<int>(p4) );
ASSERT_EQUALS( p2.get() , ptr<int>(ptr<const int>(p2)) );
#endif
- }
- };
-
- struct StringSplitterTest {
-
- void test( string s ) {
- vector<string> v = StringSplitter::split( s , "," );
- ASSERT_EQUALS( s , StringSplitter::join( v , "," ) );
- }
-
- void run() {
- test( "a" );
- test( "a,b" );
- test( "a,b,c" );
-
- vector<string> x = StringSplitter::split( "axbxc" , "x" );
- ASSERT_EQUALS( 3 , (int)x.size() );
- ASSERT_EQUALS( "a" , x[0] );
- ASSERT_EQUALS( "b" , x[1] );
- ASSERT_EQUALS( "c" , x[2] );
+ }
+};
- x = StringSplitter::split( "axxbxxc" , "xx" );
- ASSERT_EQUALS( 3 , (int)x.size() );
- ASSERT_EQUALS( "a" , x[0] );
- ASSERT_EQUALS( "b" , x[1] );
- ASSERT_EQUALS( "c" , x[2] );
+struct StringSplitterTest {
+ void test(string s) {
+ vector<string> v = StringSplitter::split(s, ",");
+ ASSERT_EQUALS(s, StringSplitter::join(v, ","));
+ }
- }
- };
+ void run() {
+ test("a");
+ test("a,b");
+ test("a,b,c");
+
+ vector<string> x = StringSplitter::split("axbxc", "x");
+ ASSERT_EQUALS(3, (int)x.size());
+ ASSERT_EQUALS("a", x[0]);
+ ASSERT_EQUALS("b", x[1]);
+ ASSERT_EQUALS("c", x[2]);
+
+ x = StringSplitter::split("axxbxxc", "xx");
+ ASSERT_EQUALS(3, (int)x.size());
+ ASSERT_EQUALS("a", x[0]);
+ ASSERT_EQUALS("b", x[1]);
+ ASSERT_EQUALS("c", x[2]);
+ }
+};
- struct IsValidUTF8Test {
+struct IsValidUTF8Test {
// macros used to get valid line numbers
-#define good(s) ASSERT(isValidUTF8(s));
-#define bad(s) ASSERT(!isValidUTF8(s));
-
- void run() {
- good("A");
- good("\xC2\xA2"); // cent: ¢
- good("\xE2\x82\xAC"); // euro: €
- good("\xF0\x9D\x90\x80"); // Blackboard A: 𝐀
-
- //abrupt end
- bad("\xC2");
- bad("\xE2\x82");
- bad("\xF0\x9D\x90");
- bad("\xC2 ");
- bad("\xE2\x82 ");
- bad("\xF0\x9D\x90 ");
-
- //too long
- bad("\xF8\x80\x80\x80\x80");
- bad("\xFC\x80\x80\x80\x80\x80");
- bad("\xFE\x80\x80\x80\x80\x80\x80");
- bad("\xFF\x80\x80\x80\x80\x80\x80\x80");
-
- bad("\xF5\x80\x80\x80"); // U+140000 > U+10FFFF
- bad("\x80"); //cant start with continuation byte
- bad("\xC0\x80"); // 2-byte version of ASCII NUL
+#define good(s) ASSERT(isValidUTF8(s));
+#define bad(s) ASSERT(!isValidUTF8(s));
+
+ void run() {
+ good("A");
+ good("\xC2\xA2"); // cent: ¢
+ good("\xE2\x82\xAC"); // euro: €
+ good("\xF0\x9D\x90\x80"); // Blackboard A: 𝐀
+
+ // abrupt end
+ bad("\xC2");
+ bad("\xE2\x82");
+ bad("\xF0\x9D\x90");
+ bad("\xC2 ");
+ bad("\xE2\x82 ");
+ bad("\xF0\x9D\x90 ");
+
+ // too long
+ bad("\xF8\x80\x80\x80\x80");
+ bad("\xFC\x80\x80\x80\x80\x80");
+ bad("\xFE\x80\x80\x80\x80\x80\x80");
+ bad("\xFF\x80\x80\x80\x80\x80\x80\x80");
+
+ bad("\xF5\x80\x80\x80"); // U+140000 > U+10FFFF
+ bad("\x80"); // cant start with continuation byte
+ bad("\xC0\x80"); // 2-byte version of ASCII NUL
#undef good
#undef bad
- }
- };
-
-
- class QueueTest {
- public:
- void run() {
- BlockingQueue<int> q;
- Timer t;
- int x;
- ASSERT( ! q.blockingPop( x , 5 ) );
- ASSERT( t.seconds() > 3 && t.seconds() < 9 );
-
- }
- };
-
- class StrTests {
- public:
-
- void run() {
- ASSERT_EQUALS( 1u , str::count( "abc" , 'b' ) );
- ASSERT_EQUALS( 3u , str::count( "babab" , 'b' ) );
- }
-
- };
-
- class HostAndPortTests {
- public:
- void run() {
- HostAndPort a( "x1" , 1000 );
- HostAndPort b( "x1" , 1000 );
- HostAndPort c( "x1" , 1001 );
- HostAndPort d( "x2" , 1000 );
-
- ASSERT( a == b );
- ASSERT( a != c );
- ASSERT( a != d );
+ }
+};
- }
- };
- class RelativePathTest {
- public:
- void run() {
- RelativePath a = RelativePath::fromRelativePath( "a" );
- RelativePath b = RelativePath::fromRelativePath( "a" );
- RelativePath c = RelativePath::fromRelativePath( "b" );
- RelativePath d = RelativePath::fromRelativePath( "a/b" );
+class QueueTest {
+public:
+ void run() {
+ BlockingQueue<int> q;
+ Timer t;
+ int x;
+ ASSERT(!q.blockingPop(x, 5));
+ ASSERT(t.seconds() > 3 && t.seconds() < 9);
+ }
+};
+class StrTests {
+public:
+ void run() {
+ ASSERT_EQUALS(1u, str::count("abc", 'b'));
+ ASSERT_EQUALS(3u, str::count("babab", 'b'));
+ }
+};
+
+class HostAndPortTests {
+public:
+ void run() {
+ HostAndPort a("x1", 1000);
+ HostAndPort b("x1", 1000);
+ HostAndPort c("x1", 1001);
+ HostAndPort d("x2", 1000);
+
+ ASSERT(a == b);
+ ASSERT(a != c);
+ ASSERT(a != d);
+ }
+};
- ASSERT( a == b );
- ASSERT( a != c );
- ASSERT( a != d );
- ASSERT( c != d );
- }
- };
-
- struct CompressionTest1 {
- void run() {
- const char * c = "this is a test";
- std::string s;
- size_t len = compress(c, strlen(c)+1, &s);
- verify( len > 0 );
-
- std::string out;
- bool ok = uncompress(s.c_str(), s.size(), &out);
- verify(ok);
- verify( strcmp(out.c_str(), c) == 0 );
- }
- } ctest1;
+class RelativePathTest {
+public:
+ void run() {
+ RelativePath a = RelativePath::fromRelativePath("a");
+ RelativePath b = RelativePath::fromRelativePath("a");
+ RelativePath c = RelativePath::fromRelativePath("b");
+ RelativePath d = RelativePath::fromRelativePath("a/b");
- class All : public Suite {
- public:
- All() : Suite( "basic" ) {
- }
- void setupTests() {
- add< Rarely >();
- add< Base64Tests >();
+ ASSERT(a == b);
+ ASSERT(a != c);
+ ASSERT(a != d);
+ ASSERT(c != d);
+ }
+};
+
+struct CompressionTest1 {
+ void run() {
+ const char* c = "this is a test";
+ std::string s;
+ size_t len = compress(c, strlen(c) + 1, &s);
+ verify(len > 0);
+
+ std::string out;
+ bool ok = uncompress(s.c_str(), s.size(), &out);
+ verify(ok);
+ verify(strcmp(out.c_str(), c) == 0);
+ }
+} ctest1;
- add< stringbuildertests::simple1 >();
- add< stringbuildertests::simple2 >();
- add< stringbuildertests::reset1 >();
- add< stringbuildertests::reset2 >();
+class All : public Suite {
+public:
+ All() : Suite("basic") {}
- add< sleeptest >();
- add< SleepBackoffTest >();
- add< AssertTests >();
+ void setupTests() {
+ add<Rarely>();
+ add<Base64Tests>();
- add< PtrTests >();
+ add<stringbuildertests::simple1>();
+ add<stringbuildertests::simple2>();
+ add<stringbuildertests::reset1>();
+ add<stringbuildertests::reset2>();
- add< StringSplitterTest >();
- add< IsValidUTF8Test >();
+ add<sleeptest>();
+ add<SleepBackoffTest>();
+ add<AssertTests>();
- add< QueueTest >();
+ add<PtrTests>();
- add< StrTests >();
+ add<StringSplitterTest>();
+ add<IsValidUTF8Test>();
- add< HostAndPortTests >();
- add< RelativePathTest >();
+ add<QueueTest>();
- add< CompressionTest1 >();
+ add<StrTests>();
- }
- };
+ add<HostAndPortTests>();
+ add<RelativePathTest>();
- SuiteInstance<All> myall;
+ add<CompressionTest1>();
+ }
+};
-} // namespace BasicTests
+SuiteInstance<All> myall;
+} // namespace BasicTests
diff --git a/src/mongo/dbtests/chunktests.cpp b/src/mongo/dbtests/chunktests.cpp
index 0f44ff7a856..22242cd6ce2 100644
--- a/src/mongo/dbtests/chunktests.cpp
+++ b/src/mongo/dbtests/chunktests.cpp
@@ -36,249 +36,326 @@
namespace mongo {
- using std::set;
- using std::string;
- using std::vector;
+using std::set;
+using std::string;
+using std::vector;
- class TestableChunkManager : public ChunkManager {
- public:
+class TestableChunkManager : public ChunkManager {
+public:
+ TestableChunkManager(const string& ns, const ShardKeyPattern& keyPattern, bool unique)
+ : ChunkManager(ns, keyPattern, unique) {}
- TestableChunkManager(const string& ns, const ShardKeyPattern& keyPattern, bool unique)
- : ChunkManager(ns, keyPattern, unique) {
- }
+ void setSingleChunkForShards(const vector<BSONObj>& splitPoints) {
+ ChunkMap& chunkMap = const_cast<ChunkMap&>(_chunkMap);
+ ChunkRangeManager& chunkRanges = const_cast<ChunkRangeManager&>(_chunkRanges);
+ set<Shard>& shards = const_cast<set<Shard>&>(_shards);
+
+ vector<BSONObj> mySplitPoints(splitPoints);
+ mySplitPoints.insert(mySplitPoints.begin(), _keyPattern.getKeyPattern().globalMin());
+ mySplitPoints.push_back(_keyPattern.getKeyPattern().globalMax());
+
+ for (unsigned i = 1; i < mySplitPoints.size(); ++i) {
+ string name = str::stream() << (i - 1);
+ Shard shard(name, name, 0 /* maxSize */, false /* draining */);
+ shards.insert(shard);
- void setSingleChunkForShards( const vector<BSONObj> &splitPoints ) {
- ChunkMap &chunkMap = const_cast<ChunkMap&>( _chunkMap );
- ChunkRangeManager &chunkRanges = const_cast<ChunkRangeManager&>( _chunkRanges );
- set<Shard> &shards = const_cast<set<Shard>&>( _shards );
-
- vector<BSONObj> mySplitPoints( splitPoints );
- mySplitPoints.insert( mySplitPoints.begin(), _keyPattern.getKeyPattern().globalMin() );
- mySplitPoints.push_back( _keyPattern.getKeyPattern().globalMax() );
-
- for( unsigned i = 1; i < mySplitPoints.size(); ++i ) {
- string name = str::stream() << (i-1);
- Shard shard(name,
- name,
- 0 /* maxSize */,
- false /* draining */);
- shards.insert( shard );
-
- ChunkPtr chunk( new Chunk( this, mySplitPoints[ i-1 ], mySplitPoints[ i ],
- shard ) );
- chunkMap[ mySplitPoints[ i ] ] = chunk;
- }
-
- chunkRanges.reloadAll( chunkMap );
+ ChunkPtr chunk(new Chunk(this, mySplitPoints[i - 1], mySplitPoints[i], shard));
+ chunkMap[mySplitPoints[i]] = chunk;
}
- };
-
-} // namespace mongo
+
+ chunkRanges.reloadAll(chunkMap);
+ }
+};
+
+} // namespace mongo
namespace ChunkTests {
- namespace ChunkManagerTests {
-
- typedef mongo::TestableChunkManager ChunkManager;
-
- class Base {
- public:
- virtual ~Base() {}
- void run() {
- ShardKeyPattern shardKeyPattern(shardKey());
- ChunkManager chunkManager("", shardKeyPattern, false);
- chunkManager.setSingleChunkForShards( splitPointsVector() );
-
- set<Shard> shards;
- chunkManager.getShardsForQuery( shards, query() );
-
- BSONArrayBuilder b;
- for( set<Shard>::const_iterator i = shards.begin(); i != shards.end(); ++i ) {
- b << i->getName();
- }
- ASSERT_EQUALS( expectedShardNames(), b.arr() );
- }
- protected:
- virtual BSONObj shardKey() const { return BSON( "a" << 1 ); }
- virtual BSONArray splitPoints() const { return BSONArray(); }
- virtual BSONObj query() const { return BSONObj(); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "0" ); }
- virtual vector<BSONObj> splitPointsVector() const {
- vector<BSONObj> ret;
- BSONArray a = splitPoints();
- BSONObjIterator i( a );
- while( i.more() ) {
- ret.push_back( i.next().Obj().getOwned() );
- }
- return ret;
- }
- };
-
- class EmptyQuerySingleShard : public Base {
- };
-
- class MultiShardBase : public Base {
- virtual BSONArray splitPoints() const {
- return BSON_ARRAY( BSON( "a" << "x" ) << BSON( "a" << "y" ) << BSON( "a" << "z" ) );
- }
- };
-
- class EmptyQueryMultiShard : public MultiShardBase {
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY( "0" << "1" << "2" << "3" );
- }
- };
-
- class UniversalRangeMultiShard : public EmptyQueryMultiShard {
- virtual BSONObj query() const { return BSON( "b" << 1 ); }
- };
-
- class EqualityRangeSingleShard : public EmptyQuerySingleShard {
- virtual BSONObj query() const { return BSON( "a" << "x" ); }
- };
-
- class EqualityRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const { return BSON( "a" << "y" ); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "2" ); }
- };
-
- class SetRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const { return fromjson( "{a:{$in:['u','y']}}" ); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "0" << "2" ); }
- };
-
- class GTRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const { return BSON( "a" << GT << "x" ); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "1" << "2" << "3" ); }
- };
-
- class GTERangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const { return BSON( "a" << GTE << "x" ); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "1" << "2" << "3" ); }
- };
-
- class LTRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const { return BSON( "a" << LT << "y" ); }
- /**
- * It isn't actually necessary to return shard 2 because its lowest key is "y", which
- * is excluded from the query. SERVER-4791
- */
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "0" << "1" << "2" ); }
- };
-
- class LTERangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const { return BSON( "a" << LTE << "y" ); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "0" << "1" << "2" ); }
- };
-
- class OrEqualities : public MultiShardBase {
- virtual BSONObj query() const { return fromjson( "{$or:[{a:'u'},{a:'y'}]}" ); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "0" << "2" ); }
- };
-
- class OrEqualityInequality : public MultiShardBase {
- virtual BSONObj query() const { return fromjson( "{$or:[{a:'u'},{a:{$gte:'y'}}]}" ); }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "0" << "2" << "3" ); }
- };
-
- class OrEqualityInequalityUnhelpful : public MultiShardBase {
- virtual BSONObj query() const {
- return fromjson( "{$or:[{a:'u'},{a:{$gte:'zz'}},{}]}" );
- }
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY( "0" << "1" << "2" << "3" );
- }
- };
-
- template<class BASE>
- class Unsatisfiable : public BASE {
- /**
- * SERVER-4914 For now the first shard is returned for unsatisfiable queries, as some
- * clients of getShardsForQuery() expect at least one shard.
- */
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY( "0" ) /* BSONArray() */;
- }
- };
-
- class UnsatisfiableRangeSingleShard : public Unsatisfiable<Base> {
- virtual BSONObj query() const { return BSON( "a" << GT << "x" << LT << "x" ); }
- };
-
- class UnsatisfiableRangeMultiShard : public Unsatisfiable<MultiShardBase> {
- virtual BSONObj query() const { return BSON( "a" << GT << "x" << LT << "x" ); }
- };
-
- class EqualityThenUnsatisfiable : public Unsatisfiable<Base> {
- virtual BSONObj shardKey() const { return BSON( "a" << 1 << "b" << 1 ); }
- virtual BSONObj query() const { return BSON( "a" << 1 << "b" << GT << 4 << LT << 4 ); }
- };
-
- class InequalityThenUnsatisfiable : public Unsatisfiable<Base> {
- virtual BSONObj shardKey() const { return BSON( "a" << 1 << "b" << 1 ); }
- virtual BSONObj query() const {
- return BSON( "a" << GT << 1 << "b" << GT << 4 << LT << 4 );
- }
- };
-
- class OrEqualityUnsatisfiableInequality : public MultiShardBase {
- virtual BSONObj query() const {
- return fromjson( "{$or:[{a:'x'},{a:{$gt:'u',$lt:'u'}},{a:{$gte:'y'}}]}" );
- }
- virtual BSONArray expectedShardNames() const { return BSON_ARRAY( "1" << "2" << "3" ); }
- };
-
- class CompoundKeyBase : public Base {
- virtual BSONObj shardKey() const {
- return BSON( "a" << 1 << "b" << 1 );
- }
- virtual BSONArray splitPoints() const {
- return BSON_ARRAY( BSON( "a" << 5 << "b" << 10 ) << BSON ( "a" << 5 << "b" << 20 ) );
- }
- };
-
- class InMultiShard : public CompoundKeyBase {
- virtual BSONObj query() const {
- return BSON( "a" << BSON( "$in" << BSON_ARRAY( 0 << 5 << 10 ) ) <<
- "b" << BSON( "$in" << BSON_ARRAY( 0 << 5 << 25 ) ) );
- }
- // If we were to send this query to just the shards it actually needed to hit, it would only hit shards 0 and 2
- // Because of the optimization from SERVER-4745, however, we'll also hit shard 1.
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY( "0" << "1" << "2" );
- }
- };
-
- } // namespace ChunkManagerTests
-
- class All : public Suite {
- public:
- All() : Suite( "chunk" ) {
+namespace ChunkManagerTests {
+
+typedef mongo::TestableChunkManager ChunkManager;
+
+class Base {
+public:
+ virtual ~Base() {}
+ void run() {
+ ShardKeyPattern shardKeyPattern(shardKey());
+ ChunkManager chunkManager("", shardKeyPattern, false);
+ chunkManager.setSingleChunkForShards(splitPointsVector());
+
+ set<Shard> shards;
+ chunkManager.getShardsForQuery(shards, query());
+
+ BSONArrayBuilder b;
+ for (set<Shard>::const_iterator i = shards.begin(); i != shards.end(); ++i) {
+ b << i->getName();
}
-
- void setupTests() {
- add<ChunkManagerTests::EmptyQuerySingleShard>();
- add<ChunkManagerTests::EmptyQueryMultiShard>();
- add<ChunkManagerTests::UniversalRangeMultiShard>();
- add<ChunkManagerTests::EqualityRangeSingleShard>();
- add<ChunkManagerTests::EqualityRangeMultiShard>();
- add<ChunkManagerTests::SetRangeMultiShard>();
- add<ChunkManagerTests::GTRangeMultiShard>();
- add<ChunkManagerTests::GTERangeMultiShard>();
- add<ChunkManagerTests::LTRangeMultiShard>();
- add<ChunkManagerTests::LTERangeMultiShard>();
- add<ChunkManagerTests::OrEqualities>();
- add<ChunkManagerTests::OrEqualityInequality>();
- add<ChunkManagerTests::OrEqualityInequalityUnhelpful>();
- add<ChunkManagerTests::UnsatisfiableRangeSingleShard>();
- add<ChunkManagerTests::UnsatisfiableRangeMultiShard>();
- add<ChunkManagerTests::EqualityThenUnsatisfiable>();
- add<ChunkManagerTests::InequalityThenUnsatisfiable>();
- add<ChunkManagerTests::OrEqualityUnsatisfiableInequality>();
- add<ChunkManagerTests::InMultiShard>();
+ ASSERT_EQUALS(expectedShardNames(), b.arr());
+ }
+
+protected:
+ virtual BSONObj shardKey() const {
+ return BSON("a" << 1);
+ }
+ virtual BSONArray splitPoints() const {
+ return BSONArray();
+ }
+ virtual BSONObj query() const {
+ return BSONObj();
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0");
+ }
+ virtual vector<BSONObj> splitPointsVector() const {
+ vector<BSONObj> ret;
+ BSONArray a = splitPoints();
+ BSONObjIterator i(a);
+ while (i.more()) {
+ ret.push_back(i.next().Obj().getOwned());
}
- };
+ return ret;
+ }
+};
+
+class EmptyQuerySingleShard : public Base {};
+
+class MultiShardBase : public Base {
+ virtual BSONArray splitPoints() const {
+ return BSON_ARRAY(BSON("a"
+ << "x")
+ << BSON("a"
+ << "y") << BSON("a"
+ << "z"));
+ }
+};
+
+class EmptyQueryMultiShard : public MultiShardBase {
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "1"
+ << "2"
+ << "3");
+ }
+};
+
+class UniversalRangeMultiShard : public EmptyQueryMultiShard {
+ virtual BSONObj query() const {
+ return BSON("b" << 1);
+ }
+};
+
+class EqualityRangeSingleShard : public EmptyQuerySingleShard {
+ virtual BSONObj query() const {
+ return BSON("a"
+ << "x");
+ }
+};
+
+class EqualityRangeMultiShard : public MultiShardBase {
+ virtual BSONObj query() const {
+ return BSON("a"
+ << "y");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("2");
+ }
+};
+
+class SetRangeMultiShard : public MultiShardBase {
+ virtual BSONObj query() const {
+ return fromjson("{a:{$in:['u','y']}}");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "2");
+ }
+};
+
+class GTRangeMultiShard : public MultiShardBase {
+ virtual BSONObj query() const {
+ return BSON("a" << GT << "x");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("1"
+ << "2"
+ << "3");
+ }
+};
+
+class GTERangeMultiShard : public MultiShardBase {
+ virtual BSONObj query() const {
+ return BSON("a" << GTE << "x");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("1"
+ << "2"
+ << "3");
+ }
+};
+
+class LTRangeMultiShard : public MultiShardBase {
+ virtual BSONObj query() const {
+ return BSON("a" << LT << "y");
+ }
+ /**
+ * It isn't actually necessary to return shard 2 because its lowest key is "y", which
+ * is excluded from the query. SERVER-4791
+ */
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "1"
+ << "2");
+ }
+};
+
+class LTERangeMultiShard : public MultiShardBase {
+ virtual BSONObj query() const {
+ return BSON("a" << LTE << "y");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "1"
+ << "2");
+ }
+};
+
+class OrEqualities : public MultiShardBase {
+ virtual BSONObj query() const {
+ return fromjson("{$or:[{a:'u'},{a:'y'}]}");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "2");
+ }
+};
+
+class OrEqualityInequality : public MultiShardBase {
+ virtual BSONObj query() const {
+ return fromjson("{$or:[{a:'u'},{a:{$gte:'y'}}]}");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "2"
+ << "3");
+ }
+};
+
+class OrEqualityInequalityUnhelpful : public MultiShardBase {
+ virtual BSONObj query() const {
+ return fromjson("{$or:[{a:'u'},{a:{$gte:'zz'}},{}]}");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "1"
+ << "2"
+ << "3");
+ }
+};
+
+template <class BASE>
+class Unsatisfiable : public BASE {
+ /**
+ * SERVER-4914 For now the first shard is returned for unsatisfiable queries, as some
+ * clients of getShardsForQuery() expect at least one shard.
+ */
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0") /* BSONArray() */;
+ }
+};
+
+class UnsatisfiableRangeSingleShard : public Unsatisfiable<Base> {
+ virtual BSONObj query() const {
+ return BSON("a" << GT << "x" << LT << "x");
+ }
+};
+
+class UnsatisfiableRangeMultiShard : public Unsatisfiable<MultiShardBase> {
+ virtual BSONObj query() const {
+ return BSON("a" << GT << "x" << LT << "x");
+ }
+};
+
+class EqualityThenUnsatisfiable : public Unsatisfiable<Base> {
+ virtual BSONObj shardKey() const {
+ return BSON("a" << 1 << "b" << 1);
+ }
+ virtual BSONObj query() const {
+ return BSON("a" << 1 << "b" << GT << 4 << LT << 4);
+ }
+};
+
+class InequalityThenUnsatisfiable : public Unsatisfiable<Base> {
+ virtual BSONObj shardKey() const {
+ return BSON("a" << 1 << "b" << 1);
+ }
+ virtual BSONObj query() const {
+ return BSON("a" << GT << 1 << "b" << GT << 4 << LT << 4);
+ }
+};
+
+class OrEqualityUnsatisfiableInequality : public MultiShardBase {
+ virtual BSONObj query() const {
+ return fromjson("{$or:[{a:'x'},{a:{$gt:'u',$lt:'u'}},{a:{$gte:'y'}}]}");
+ }
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("1"
+ << "2"
+ << "3");
+ }
+};
+
+class CompoundKeyBase : public Base {
+ virtual BSONObj shardKey() const {
+ return BSON("a" << 1 << "b" << 1);
+ }
+ virtual BSONArray splitPoints() const {
+ return BSON_ARRAY(BSON("a" << 5 << "b" << 10) << BSON("a" << 5 << "b" << 20));
+ }
+};
+
+class InMultiShard : public CompoundKeyBase {
+ virtual BSONObj query() const {
+ return BSON("a" << BSON("$in" << BSON_ARRAY(0 << 5 << 10)) << "b"
+ << BSON("$in" << BSON_ARRAY(0 << 5 << 25)));
+ }
+ // If we were to send this query to just the shards it actually needed to hit, it would only hit shards 0 and 2
+ // Because of the optimization from SERVER-4745, however, we'll also hit shard 1.
+ virtual BSONArray expectedShardNames() const {
+ return BSON_ARRAY("0"
+ << "1"
+ << "2");
+ }
+};
+
+} // namespace ChunkManagerTests
+
+class All : public Suite {
+public:
+ All() : Suite("chunk") {}
+
+ void setupTests() {
+ add<ChunkManagerTests::EmptyQuerySingleShard>();
+ add<ChunkManagerTests::EmptyQueryMultiShard>();
+ add<ChunkManagerTests::UniversalRangeMultiShard>();
+ add<ChunkManagerTests::EqualityRangeSingleShard>();
+ add<ChunkManagerTests::EqualityRangeMultiShard>();
+ add<ChunkManagerTests::SetRangeMultiShard>();
+ add<ChunkManagerTests::GTRangeMultiShard>();
+ add<ChunkManagerTests::GTERangeMultiShard>();
+ add<ChunkManagerTests::LTRangeMultiShard>();
+ add<ChunkManagerTests::LTERangeMultiShard>();
+ add<ChunkManagerTests::OrEqualities>();
+ add<ChunkManagerTests::OrEqualityInequality>();
+ add<ChunkManagerTests::OrEqualityInequalityUnhelpful>();
+ add<ChunkManagerTests::UnsatisfiableRangeSingleShard>();
+ add<ChunkManagerTests::UnsatisfiableRangeMultiShard>();
+ add<ChunkManagerTests::EqualityThenUnsatisfiable>();
+ add<ChunkManagerTests::InequalityThenUnsatisfiable>();
+ add<ChunkManagerTests::OrEqualityUnsatisfiableInequality>();
+ add<ChunkManagerTests::InMultiShard>();
+ }
+};
- SuiteInstance<All> myAll;
+SuiteInstance<All> myAll;
-} // namespace ChunkTests
+} // namespace ChunkTests
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index efd31f5e580..fcef64d3209 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -38,204 +38,205 @@
namespace ClientTests {
- using std::auto_ptr;
- using std::string;
- using std::vector;
+using std::auto_ptr;
+using std::string;
+using std::vector;
- class Base {
- public:
+class Base {
+public:
+ Base(string coll) : _ns("test." + coll) {
+ OperationContextImpl txn;
+ DBDirectClient db(&txn);
- Base( string coll ) : _ns("test." + coll) {
- OperationContextImpl txn;
- DBDirectClient db(&txn);
+ db.dropDatabase("test");
+ }
- db.dropDatabase("test");
- }
-
- virtual ~Base() {
- OperationContextImpl txn;
- DBDirectClient db(&txn);
+ virtual ~Base() {
+ OperationContextImpl txn;
+ DBDirectClient db(&txn);
- db.dropCollection( _ns );
- }
+ db.dropCollection(_ns);
+ }
- const char * ns() { return _ns.c_str(); }
+ const char* ns() {
+ return _ns.c_str();
+ }
- const string _ns;
- };
+ const string _ns;
+};
- class DropIndex : public Base {
- public:
- DropIndex() : Base( "dropindex" ) {}
- void run() {
- OperationContextImpl txn;
- DBDirectClient db(&txn);
+class DropIndex : public Base {
+public:
+ DropIndex() : Base("dropindex") {}
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient db(&txn);
- db.insert( ns() , BSON( "x" << 2 ) );
- ASSERT_EQUALS( 1u , db.getIndexSpecs(ns()).size() );
+ db.insert(ns(), BSON("x" << 2));
+ ASSERT_EQUALS(1u, db.getIndexSpecs(ns()).size());
- ASSERT_OK(dbtests::createIndex( &txn, ns(), BSON( "x" << 1 ) ));
- ASSERT_EQUALS( 2u , db.getIndexSpecs(ns()).size() );
+ ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1)));
+ ASSERT_EQUALS(2u, db.getIndexSpecs(ns()).size());
- db.dropIndex( ns() , BSON( "x" << 1 ) );
- ASSERT_EQUALS( 1u , db.getIndexSpecs(ns()).size() );
+ db.dropIndex(ns(), BSON("x" << 1));
+ ASSERT_EQUALS(1u, db.getIndexSpecs(ns()).size());
- ASSERT_OK(dbtests::createIndex( &txn, ns(), BSON( "x" << 1 ) ));
- ASSERT_EQUALS( 2u , db.getIndexSpecs(ns()).size() );
-
- db.dropIndexes( ns() );
- ASSERT_EQUALS( 1u , db.getIndexSpecs(ns()).size() );
- }
- };
+ ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1)));
+ ASSERT_EQUALS(2u, db.getIndexSpecs(ns()).size());
- /**
- * Check that nIndexes is incremented correctly when an index builds (and that it is not
- * incremented when an index fails to build), system.indexes has an entry added (or not), and
- * system.namespaces has a doc added (or not).
- */
- class BuildIndex : public Base {
- public:
- BuildIndex() : Base("buildIndex") {}
- void run() {
- OperationContextImpl txn;
+ db.dropIndexes(ns());
+ ASSERT_EQUALS(1u, db.getIndexSpecs(ns()).size());
+ }
+};
- Client::WriteContext ctx(&txn, ns());
- DBDirectClient db(&txn);
-
- db.insert(ns(), BSON("x" << 1 << "y" << 2));
- db.insert(ns(), BSON("x" << 2 << "y" << 2));
+/**
+ * Check that nIndexes is incremented correctly when an index builds (and that it is not
+ * incremented when an index fails to build), system.indexes has an entry added (or not), and
+ * system.namespaces has a doc added (or not).
+ */
+class BuildIndex : public Base {
+public:
+ BuildIndex() : Base("buildIndex") {}
+ void run() {
+ OperationContextImpl txn;
- Collection* collection = ctx.getCollection();
- ASSERT( collection );
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ Client::WriteContext ctx(&txn, ns());
+ DBDirectClient db(&txn);
- ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn));
- // _id index
- ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size());
+ db.insert(ns(), BSON("x" << 1 << "y" << 2));
+ db.insert(ns(), BSON("x" << 2 << "y" << 2));
- ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- dbtests::createIndex(&txn, ns(), BSON("y" << 1), true));
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
- ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn));
- ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size());
+ ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn));
+ // _id index
+ ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size());
- ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1), true));
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey,
+ dbtests::createIndex(&txn, ns(), BSON("y" << 1), true));
- ASSERT_EQUALS(2, indexCatalog->numIndexesReady(&txn));
- ASSERT_EQUALS(2U, db.getIndexSpecs(ns()).size());
- }
- };
+ ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn));
+ ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size());
- class CS_10 : public Base {
- public:
- CS_10() : Base( "CS_10" ) {}
- void run() {
- OperationContextImpl txn;
- DBDirectClient db(&txn);
+ ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1), true));
- const string longs( 770, 'c' );
- for (int i = 0; i < 1111; ++i) {
- db.insert(ns(), BSON("a" << i << "b" << longs));
- }
+ ASSERT_EQUALS(2, indexCatalog->numIndexesReady(&txn));
+ ASSERT_EQUALS(2U, db.getIndexSpecs(ns()).size());
+ }
+};
- ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON( "a" << 1 << "b" << 1 ) ));
+class CS_10 : public Base {
+public:
+ CS_10() : Base("CS_10") {}
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient db(&txn);
- auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "a" << 1 << "b" << 1 ) ) );
- ASSERT_EQUALS( 1111, c->itcount() );
+ const string longs(770, 'c');
+ for (int i = 0; i < 1111; ++i) {
+ db.insert(ns(), BSON("a" << i << "b" << longs));
}
- };
-
- class PushBack : public Base {
- public:
- PushBack() : Base( "PushBack" ) {}
- void run() {
- OperationContextImpl txn;
- DBDirectClient db(&txn);
- for (int i = 0; i < 10; ++i) {
- db.insert(ns(), BSON("i" << i));
- }
+ ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("a" << 1 << "b" << 1)));
- auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "i" << 1 ) ) );
+ auto_ptr<DBClientCursor> c = db.query(ns(), Query().sort(BSON("a" << 1 << "b" << 1)));
+ ASSERT_EQUALS(1111, c->itcount());
+ }
+};
- BSONObj o = c->next();
- ASSERT( c->more() );
- ASSERT_EQUALS( 9 , c->objsLeftInBatch() );
- ASSERT( c->moreInCurrentBatch() );
+class PushBack : public Base {
+public:
+ PushBack() : Base("PushBack") {}
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient db(&txn);
- c->putBack( o );
- ASSERT( c->more() );
- ASSERT_EQUALS( 10, c->objsLeftInBatch() );
- ASSERT( c->moreInCurrentBatch() );
-
- o = c->next();
- BSONObj o2 = c->next();
- BSONObj o3 = c->next();
- c->putBack( o3 );
- c->putBack( o2 );
- c->putBack( o );
- for( int i = 0; i < 10; ++i ) {
- o = c->next();
- ASSERT_EQUALS( i, o[ "i" ].number() );
- }
- ASSERT( !c->more() );
- ASSERT_EQUALS( 0, c->objsLeftInBatch() );
- ASSERT( !c->moreInCurrentBatch() );
-
- c->putBack( o );
- ASSERT( c->more() );
- ASSERT_EQUALS( 1, c->objsLeftInBatch() );
- ASSERT( c->moreInCurrentBatch() );
- ASSERT_EQUALS( 1, c->itcount() );
- }
- };
-
- class Create : public Base {
- public:
- Create() : Base( "Create" ) {}
- void run() {
- OperationContextImpl txn;
- DBDirectClient db(&txn);
-
- db.createCollection("unittests.clienttests.create", 4096, true);
- BSONObj info;
- ASSERT( db.runCommand( "unittests", BSON( "collstats" << "clienttests.create" ), info ) );
+ for (int i = 0; i < 10; ++i) {
+ db.insert(ns(), BSON("i" << i));
}
- };
-
- class ConnectionStringTests {
- public:
- void run() {
- {
- ConnectionString s( "a/b,c,d" , ConnectionString::SET );
- ASSERT_EQUALS( ConnectionString::SET , s.type() );
- ASSERT_EQUALS( "a" , s.getSetName() );
- vector<HostAndPort> v = s.getServers();
- ASSERT_EQUALS( 3U , v.size() );
- ASSERT_EQUALS( "b" , v[0].host() );
- ASSERT_EQUALS( "c" , v[1].host() );
- ASSERT_EQUALS( "d" , v[2].host() );
- }
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "client" ) {
+ auto_ptr<DBClientCursor> c = db.query(ns(), Query().sort(BSON("i" << 1)));
+
+ BSONObj o = c->next();
+ ASSERT(c->more());
+ ASSERT_EQUALS(9, c->objsLeftInBatch());
+ ASSERT(c->moreInCurrentBatch());
+
+ c->putBack(o);
+ ASSERT(c->more());
+ ASSERT_EQUALS(10, c->objsLeftInBatch());
+ ASSERT(c->moreInCurrentBatch());
+
+ o = c->next();
+ BSONObj o2 = c->next();
+ BSONObj o3 = c->next();
+ c->putBack(o3);
+ c->putBack(o2);
+ c->putBack(o);
+ for (int i = 0; i < 10; ++i) {
+ o = c->next();
+ ASSERT_EQUALS(i, o["i"].number());
}
-
- void setupTests() {
- add<DropIndex>();
- add<BuildIndex>();
- add<CS_10>();
- add<PushBack>();
- add<Create>();
- add<ConnectionStringTests>();
+ ASSERT(!c->more());
+ ASSERT_EQUALS(0, c->objsLeftInBatch());
+ ASSERT(!c->moreInCurrentBatch());
+
+ c->putBack(o);
+ ASSERT(c->more());
+ ASSERT_EQUALS(1, c->objsLeftInBatch());
+ ASSERT(c->moreInCurrentBatch());
+ ASSERT_EQUALS(1, c->itcount());
+ }
+};
+
+class Create : public Base {
+public:
+ Create() : Base("Create") {}
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient db(&txn);
+
+ db.createCollection("unittests.clienttests.create", 4096, true);
+ BSONObj info;
+ ASSERT(db.runCommand("unittests",
+ BSON("collstats"
+ << "clienttests.create"),
+ info));
+ }
+};
+
+class ConnectionStringTests {
+public:
+ void run() {
+ {
+ ConnectionString s("a/b,c,d", ConnectionString::SET);
+ ASSERT_EQUALS(ConnectionString::SET, s.type());
+ ASSERT_EQUALS("a", s.getSetName());
+ vector<HostAndPort> v = s.getServers();
+ ASSERT_EQUALS(3U, v.size());
+ ASSERT_EQUALS("b", v[0].host());
+ ASSERT_EQUALS("c", v[1].host());
+ ASSERT_EQUALS("d", v[2].host());
}
-
- };
-
- SuiteInstance<All> all;
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("client") {}
+
+ void setupTests() {
+ add<DropIndex>();
+ add<BuildIndex>();
+ add<CS_10>();
+ add<PushBack>();
+ add<Create>();
+ add<ConnectionStringTests>();
+ }
+};
+
+SuiteInstance<All> all;
}
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index f6e87de5885..c10b6bb5940 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -37,235 +37,240 @@ using namespace mongo;
namespace CommandTests {
- using std::string;
-
- // one namespace per command
- namespace FileMD5 {
- struct Base {
- Base() : db(&_txn) {
- db.dropCollection(ns());
- ASSERT_OK(dbtests::createIndex(&_txn, ns(),BSON( "files_id" << 1 << "n" << 1 )));
- }
+using std::string;
+
+// one namespace per command
+namespace FileMD5 {
+struct Base {
+ Base() : db(&_txn) {
+ db.dropCollection(ns());
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("files_id" << 1 << "n" << 1)));
+ }
- const char* ns() { return "test.fs.chunks"; }
-
- OperationContextImpl _txn;
- DBDirectClient db;
- };
- struct Type0 : Base {
- void run() {
- {
- BSONObjBuilder b;
- b.genOID();
- b.append("files_id", 0);
- b.append("n", 0);
- b.appendBinData("data", 6, BinDataGeneral, "hello ");
- db.insert(ns(), b.obj());
- }
- {
- BSONObjBuilder b;
- b.genOID();
- b.append("files_id", 0);
- b.append("n", 1);
- b.appendBinData("data", 5, BinDataGeneral, "world");
- db.insert(ns(), b.obj());
- }
-
- BSONObj result;
- ASSERT( db.runCommand("test", BSON("filemd5" << 0), result) );
- ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() );
- }
- };
- struct Type2 : Base {
- void run() {
- {
- BSONObjBuilder b;
- b.genOID();
- b.append("files_id", 0);
- b.append("n", 0);
- b.appendBinDataArrayDeprecated("data", "hello ", 6);
- db.insert(ns(), b.obj());
- }
- {
- BSONObjBuilder b;
- b.genOID();
- b.append("files_id", 0);
- b.append("n", 1);
- b.appendBinDataArrayDeprecated("data", "world", 5);
- db.insert(ns(), b.obj());
- }
-
- BSONObj result;
- ASSERT( db.runCommand("test", BSON("filemd5" << 0), result) );
- ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() );
- }
- };
+ const char* ns() {
+ return "test.fs.chunks";
}
-namespace SymbolArgument {
- // SERVER-16260
- // The Ruby driver expects server commands to accept the Symbol BSON type as a collection name.
- // This is a historical quirk that we shall support until corrected versions of the Ruby driver
- // can be distributed. Retain these tests until MongoDB 3.0
-
- class Base {
- public:
- Base() : db(&_txn) {
- db.dropCollection(ns());
+ OperationContextImpl _txn;
+ DBDirectClient db;
+};
+struct Type0 : Base {
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 0);
+ b.appendBinData("data", 6, BinDataGeneral, "hello ");
+ db.insert(ns(), b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 1);
+ b.appendBinData("data", 5, BinDataGeneral, "world");
+ db.insert(ns(), b.obj());
}
- const char* ns() { return "test.symbolarg"; }
- const char* nsDb() { return "test"; }
- const char* nsColl() { return "symbolarg"; }
-
- OperationContextImpl _txn;
- DBDirectClient db;
- };
-
- class Drop : Base {
- public:
- void run() {
- ASSERT( db.createCollection(ns()) );
- {
- BSONObjBuilder cmd;
- cmd.appendSymbol("drop", nsColl()); // Use Symbol for SERVER-16260
-
- BSONObj result;
- bool ok = db.runCommand(nsDb(), cmd.obj(), result);
- log() << result.jsonString();
- ASSERT(ok);
- }
- db.resetIndexCache();
+ BSONObj result;
+ ASSERT(db.runCommand("test", BSON("filemd5" << 0), result));
+ ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result["md5"].valuestr());
+ }
+};
+struct Type2 : Base {
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 0);
+ b.appendBinDataArrayDeprecated("data", "hello ", 6);
+ db.insert(ns(), b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 1);
+ b.appendBinDataArrayDeprecated("data", "world", 5);
+ db.insert(ns(), b.obj());
}
- };
- class DropIndexes : Base {
- public:
- void run() {
- ASSERT( db.createCollection(ns()) );
+ BSONObj result;
+ ASSERT(db.runCommand("test", BSON("filemd5" << 0), result));
+ ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result["md5"].valuestr());
+ }
+};
+}
+
+namespace SymbolArgument {
+// SERVER-16260
+// The Ruby driver expects server commands to accept the Symbol BSON type as a collection name.
+// This is a historical quirk that we shall support until corrected versions of the Ruby driver
+// can be distributed. Retain these tests until MongoDB 3.0
+
+class Base {
+public:
+ Base() : db(&_txn) {
+ db.dropCollection(ns());
+ }
+
+ const char* ns() {
+ return "test.symbolarg";
+ }
+ const char* nsDb() {
+ return "test";
+ }
+ const char* nsColl() {
+ return "symbolarg";
+ }
+
+ OperationContextImpl _txn;
+ DBDirectClient db;
+};
+class Drop : Base {
+public:
+ void run() {
+ ASSERT(db.createCollection(ns()));
+ {
BSONObjBuilder cmd;
- cmd.appendSymbol("dropIndexes", nsColl()); // Use Symbol for SERVER-16260
- cmd.append("index", "*");
+ cmd.appendSymbol("drop", nsColl()); // Use Symbol for SERVER-16260
BSONObj result;
bool ok = db.runCommand(nsDb(), cmd.obj(), result);
log() << result.jsonString();
ASSERT(ok);
}
- };
-
- class FindAndModify : Base {
- public:
- void run() {
- ASSERT( db.createCollection(ns()) );
- {
- BSONObjBuilder b;
- b.genOID();
- b.append("name", "Tom");
- b.append("rating", 0);
- db.insert(ns(), b.obj());
+ db.resetIndexCache();
+ }
+};
+
+class DropIndexes : Base {
+public:
+ void run() {
+ ASSERT(db.createCollection(ns()));
+
+ BSONObjBuilder cmd;
+ cmd.appendSymbol("dropIndexes", nsColl()); // Use Symbol for SERVER-16260
+ cmd.append("index", "*");
+
+ BSONObj result;
+ bool ok = db.runCommand(nsDb(), cmd.obj(), result);
+ log() << result.jsonString();
+ ASSERT(ok);
+ }
+};
+
+class FindAndModify : Base {
+public:
+ void run() {
+ ASSERT(db.createCollection(ns()));
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("name", "Tom");
+ b.append("rating", 0);
+ db.insert(ns(), b.obj());
+ }
+
+ BSONObjBuilder cmd;
+ cmd.appendSymbol("findAndModify", nsColl()); // Use Symbol for SERVER-16260
+ cmd.append("update", BSON("$inc" << BSON("score" << 1)));
+ cmd.append("new", true);
+
+ BSONObj result;
+ bool ok = db.runCommand(nsDb(), cmd.obj(), result);
+ log() << result.jsonString();
+ ASSERT(ok);
+ // TODO(kangas) test that Tom's score is 1
+ }
+};
+
+class GeoSearch : Base {
+public:
+ void run() {
+ // Subset of geo_haystack1.js
+
+ int n = 0;
+ for (int x = 0; x < 20; x++) {
+ for (int y = 0; y < 20; y++) {
+ db.insert(ns(), BSON("_id" << n << "loc" << BSON_ARRAY(x << y) << "z" << n % 5));
+ n++;
}
+ }
+
+ // Build geoHaystack index. Can's use db.ensureIndex, no way to pass "bucketSize".
+ // So run createIndexes command instead.
+ //
+ // Shell example:
+ // t.ensureIndex( { loc : "geoHaystack" , z : 1 }, { bucketSize : .7 } );
+ {
BSONObjBuilder cmd;
- cmd.appendSymbol("findAndModify", nsColl()); // Use Symbol for SERVER-16260
- cmd.append("update", BSON("$inc" << BSON("score" << 1)) );
- cmd.append("new", true);
+ cmd.append("createIndexes", nsColl());
+ cmd.append("indexes",
+ BSON_ARRAY(BSON("key" << BSON("loc"
+ << "geoHaystack"
+ << "z" << 1.0) << "name"
+ << "loc_geoHaystack_z_1"
+ << "bucketSize" << static_cast<double>(0.7))));
+
+ BSONObj result;
+ ASSERT(db.runCommand(nsDb(), cmd.obj(), result));
+ }
+
+ {
+ BSONObjBuilder cmd;
+ cmd.appendSymbol("geoSearch", nsColl()); // Use Symbol for SERVER-16260
+ cmd.append("near", BSON_ARRAY(7 << 8));
+ cmd.append("maxDistance", 3);
+ cmd.append("search", BSON("z" << 3));
BSONObj result;
bool ok = db.runCommand(nsDb(), cmd.obj(), result);
log() << result.jsonString();
ASSERT(ok);
- // TODO(kangas) test that Tom's score is 1
}
- };
-
- class GeoSearch : Base {
- public:
- void run() {
- // Subset of geo_haystack1.js
-
- int n = 0;
- for (int x = 0; x < 20; x++) {
- for (int y = 0; y < 20; y++) {
- db.insert(ns(), BSON("_id" << n <<
- "loc" << BSON_ARRAY(x << y) <<
- "z" << n % 5));
- n++;
- }
- }
+ }
+};
- // Build geoHaystack index. Can's use db.ensureIndex, no way to pass "bucketSize".
- // So run createIndexes command instead.
- //
- // Shell example:
- // t.ensureIndex( { loc : "geoHaystack" , z : 1 }, { bucketSize : .7 } );
-
- {
- BSONObjBuilder cmd;
- cmd.append("createIndexes", nsColl());
- cmd.append("indexes", BSON_ARRAY(
- BSON("key" << BSON("loc" << "geoHaystack" << "z" << 1.0) <<
- "name" << "loc_geoHaystack_z_1" <<
- "bucketSize" << static_cast<double>(0.7))
- ));
-
- BSONObj result;
- ASSERT( db.runCommand(nsDb(), cmd.obj(), result) );
- }
+class Touch : Base {
+public:
+ void run() {
+ ASSERT(db.createCollection(ns()));
+ {
+ BSONObjBuilder cmd;
+ cmd.appendSymbol("touch", nsColl()); // Use Symbol for SERVER-16260
+ cmd.append("data", true);
+ cmd.append("index", true);
- {
- BSONObjBuilder cmd;
- cmd.appendSymbol("geoSearch", nsColl()); // Use Symbol for SERVER-16260
- cmd.append("near", BSON_ARRAY(7 << 8));
- cmd.append("maxDistance", 3);
- cmd.append("search", BSON("z" << 3));
-
- BSONObj result;
- bool ok = db.runCommand(nsDb(), cmd.obj(), result);
- log() << result.jsonString();
- ASSERT(ok);
- }
- }
- };
-
- class Touch : Base {
- public:
- void run() {
- ASSERT( db.createCollection(ns()) );
- {
- BSONObjBuilder cmd;
- cmd.appendSymbol("touch", nsColl()); // Use Symbol for SERVER-16260
- cmd.append("data", true);
- cmd.append("index", true);
-
- BSONObj result;
- bool ok = db.runCommand(nsDb(), cmd.obj(), result);
- log() << result.jsonString();
- ASSERT(ok || result["code"].Int() == ErrorCodes::CommandNotSupported);
- }
+ BSONObj result;
+ bool ok = db.runCommand(nsDb(), cmd.obj(), result);
+ log() << result.jsonString();
+ ASSERT(ok || result["code"].Int() == ErrorCodes::CommandNotSupported);
}
- };
+ }
+};
} // SymbolArgument
- class All : public Suite {
- public:
- All() : Suite( "commands" ) {
- }
-
- void setupTests() {
- add< FileMD5::Type0 >();
- add< FileMD5::Type2 >();
- add< FileMD5::Type2 >();
- add< SymbolArgument::DropIndexes >();
- add< SymbolArgument::FindAndModify >();
- add< SymbolArgument::Touch >();
- add< SymbolArgument::Drop >();
- add< SymbolArgument::GeoSearch >();
- }
-
- };
+class All : public Suite {
+public:
+ All() : Suite("commands") {}
+
+ void setupTests() {
+ add<FileMD5::Type0>();
+ add<FileMD5::Type2>();
+ add<FileMD5::Type2>();
+ add<SymbolArgument::DropIndexes>();
+ add<SymbolArgument::FindAndModify>();
+ add<SymbolArgument::Touch>();
+ add<SymbolArgument::Drop>();
+ add<SymbolArgument::GeoSearch>();
+ }
+};
- SuiteInstance<All> all;
+SuiteInstance<All> all;
}
diff --git a/src/mongo/dbtests/config_server_fixture.cpp b/src/mongo/dbtests/config_server_fixture.cpp
index c92f521d433..ed7bbeaaa2a 100644
--- a/src/mongo/dbtests/config_server_fixture.cpp
+++ b/src/mongo/dbtests/config_server_fixture.cpp
@@ -47,106 +47,100 @@
namespace mongo {
- using boost::scoped_ptr;
- using std::endl;
- using std::list;
- using std::string;
-
- ConfigServerFixture::ConfigServerFixture()
- : _client(&_txn),
- _connectHook(NULL) {
-
- }
-
- void ConfigServerFixture::setUp() {
- DBException::traceExceptions = true;
-
- // Make all connections redirect to the direct client
- _connectHook = new CustomConnectHook(&_txn);
- ConnectionString::setConnectionHook(_connectHook);
- // Disable the lock pinger
- setLockPingerEnabled(false);
-
- // Create the default config database before querying, necessary for direct connections
- clearServer();
- _client.insert("config.test", BSON( "hello" << "world" ));
- _client.dropCollection("config.test");
-
- // Create an index over the chunks, to allow correct diffing
- ASSERT_OK(dbtests::createIndex(&_txn,
- ChunkType::ConfigNS,
- BSON( ChunkType::ns() << 1 <<
- ChunkType::DEPRECATED_lastmod() << 1 )));
- configServer.init(configSvr().toString());
- }
-
- void ConfigServerFixture::clearServer() {
- _client.dropDatabase("config");
- }
+using boost::scoped_ptr;
+using std::endl;
+using std::list;
+using std::string;
+
+ConfigServerFixture::ConfigServerFixture() : _client(&_txn), _connectHook(NULL) {}
+
+void ConfigServerFixture::setUp() {
+ DBException::traceExceptions = true;
+
+ // Make all connections redirect to the direct client
+ _connectHook = new CustomConnectHook(&_txn);
+ ConnectionString::setConnectionHook(_connectHook);
+ // Disable the lock pinger
+ setLockPingerEnabled(false);
+
+ // Create the default config database before querying, necessary for direct connections
+ clearServer();
+ _client.insert("config.test",
+ BSON("hello"
+ << "world"));
+ _client.dropCollection("config.test");
+
+ // Create an index over the chunks, to allow correct diffing
+ ASSERT_OK(
+ dbtests::createIndex(&_txn,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << 1 << ChunkType::DEPRECATED_lastmod() << 1)));
+ configServer.init(configSvr().toString());
+}
- void ConfigServerFixture::clearVersion() {
- _client.dropCollection(VersionType::ConfigNS);
- }
+void ConfigServerFixture::clearServer() {
+ _client.dropDatabase("config");
+}
- void ConfigServerFixture::clearShards() {
- _client.dropCollection(ShardType::ConfigNS);
- }
+void ConfigServerFixture::clearVersion() {
+ _client.dropCollection(VersionType::ConfigNS);
+}
- void ConfigServerFixture::clearDatabases() {
- _client.dropCollection(DatabaseType::ConfigNS);
- }
+void ConfigServerFixture::clearShards() {
+ _client.dropCollection(ShardType::ConfigNS);
+}
- void ConfigServerFixture::clearCollections() {
- _client.dropCollection(CollectionType::ConfigNS);
- }
+void ConfigServerFixture::clearDatabases() {
+ _client.dropCollection(DatabaseType::ConfigNS);
+}
- void ConfigServerFixture::clearChunks() {
- _client.dropCollection(ChunkType::ConfigNS);
- }
+void ConfigServerFixture::clearCollections() {
+ _client.dropCollection(CollectionType::ConfigNS);
+}
- void ConfigServerFixture::clearPings() {
- _client.dropCollection(MongosType::ConfigNS);
- }
+void ConfigServerFixture::clearChunks() {
+ _client.dropCollection(ChunkType::ConfigNS);
+}
- void ConfigServerFixture::clearChangelog() {
- _client.dropCollection(ChangelogType::ConfigNS);
- }
+void ConfigServerFixture::clearPings() {
+ _client.dropCollection(MongosType::ConfigNS);
+}
- void ConfigServerFixture::dumpServer() {
+void ConfigServerFixture::clearChangelog() {
+ _client.dropCollection(ChangelogType::ConfigNS);
+}
- log() << "Dumping virtual config server to log..." << endl;
+void ConfigServerFixture::dumpServer() {
+ log() << "Dumping virtual config server to log..." << endl;
- list<string> collectionNames(_client.getCollectionNames("config"));
+ list<string> collectionNames(_client.getCollectionNames("config"));
- for (list<string>::iterator it = collectionNames.begin(); it != collectionNames.end(); ++it)
- {
- const string& collection = *it;
+ for (list<string>::iterator it = collectionNames.begin(); it != collectionNames.end(); ++it) {
+ const string& collection = *it;
- scoped_ptr<DBClientCursor> cursor(_client.query(collection, BSONObj()).release());
- ASSERT(cursor.get() != NULL);
+ scoped_ptr<DBClientCursor> cursor(_client.query(collection, BSONObj()).release());
+ ASSERT(cursor.get() != NULL);
- log() << "Dumping collection " << collection << endl;
+ log() << "Dumping collection " << collection << endl;
- while (cursor->more()) {
- BSONObj obj = cursor->nextSafe();
- log() << obj.toString() << endl;
- }
+ while (cursor->more()) {
+ BSONObj obj = cursor->nextSafe();
+ log() << obj.toString() << endl;
}
}
+}
- void ConfigServerFixture::tearDown() {
-
- clearServer();
-
- // Reset the pinger
- setLockPingerEnabled(true);
+void ConfigServerFixture::tearDown() {
+ clearServer();
- // Make all connections redirect to the direct client
- ConnectionString::setConnectionHook(NULL);
- delete _connectHook;
- _connectHook = NULL;
+ // Reset the pinger
+ setLockPingerEnabled(true);
- DBException::traceExceptions = false;
- }
+ // Make all connections redirect to the direct client
+ ConnectionString::setConnectionHook(NULL);
+ delete _connectHook;
+ _connectHook = NULL;
+ DBException::traceExceptions = false;
+}
}
diff --git a/src/mongo/dbtests/config_server_fixture.h b/src/mongo/dbtests/config_server_fixture.h
index f8a7f7838e0..5878df8919e 100644
--- a/src/mongo/dbtests/config_server_fixture.h
+++ b/src/mongo/dbtests/config_server_fixture.h
@@ -37,102 +37,98 @@
namespace mongo {
- class CustomDirectClient: public DBDirectClient {
- public:
- CustomDirectClient(OperationContext* txn) : DBDirectClient(txn) {
- setWireVersions(minWireVersion, maxWireVersion);
- }
-
- virtual ConnectionString::ConnectionType type() const {
- return ConnectionString::CUSTOM;
- }
-
- virtual bool recv( Message& m ) {
- // This is tailored to act as a dummy response for write commands.
-
- BufBuilder bb;
- bb.skip(sizeof(QueryResult::Value));
-
- BSONObj cmdResult(BSON("ok" << 1));
-
- bb.appendBuf(cmdResult.objdata(), cmdResult.objsize());
-
- QueryResult::View qr = bb.buf();
- bb.decouple();
- qr.setResultFlagsToOk();
- qr.msgdata().setLen(bb.len());
- qr.msgdata().setOperation(opReply);
- qr.setCursorId(0);
- qr.setStartingFrom(0);
- qr.setNReturned(1);
- m.setData(qr.view2ptr(), true);
-
- return true;
- }
- };
-
- class CustomConnectHook : public ConnectionString::ConnectionHook {
- public:
- CustomConnectHook(OperationContext* txn) : _txn(txn) { }
-
- virtual DBClientBase* connect(const ConnectionString& connStr,
- std::string& errmsg,
- double socketTimeout)
- {
- // Note - must be new, since it gets owned elsewhere
- return new CustomDirectClient(_txn);
- }
-
- private:
- OperationContext* const _txn;
- };
+class CustomDirectClient : public DBDirectClient {
+public:
+ CustomDirectClient(OperationContext* txn) : DBDirectClient(txn) {
+ setWireVersions(minWireVersion, maxWireVersion);
+ }
+
+ virtual ConnectionString::ConnectionType type() const {
+ return ConnectionString::CUSTOM;
+ }
+
+ virtual bool recv(Message& m) {
+ // This is tailored to act as a dummy response for write commands.
+
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult::Value));
+
+ BSONObj cmdResult(BSON("ok" << 1));
+
+ bb.appendBuf(cmdResult.objdata(), cmdResult.objsize());
+
+ QueryResult::View qr = bb.buf();
+ bb.decouple();
+ qr.setResultFlagsToOk();
+ qr.msgdata().setLen(bb.len());
+ qr.msgdata().setOperation(opReply);
+ qr.setCursorId(0);
+ qr.setStartingFrom(0);
+ qr.setNReturned(1);
+ m.setData(qr.view2ptr(), true);
+
+ return true;
+ }
+};
+
+class CustomConnectHook : public ConnectionString::ConnectionHook {
+public:
+ CustomConnectHook(OperationContext* txn) : _txn(txn) {}
+
+ virtual DBClientBase* connect(const ConnectionString& connStr,
+ std::string& errmsg,
+ double socketTimeout) {
+ // Note - must be new, since it gets owned elsewhere
+ return new CustomDirectClient(_txn);
+ }
+
+private:
+ OperationContext* const _txn;
+};
+
+/**
+ * Fixture for testing complicated operations against a "virtual" config server.
+ *
+ * Use this if your test requires complex commands and writing to many collections,
+ * otherwise a unit test in the mock framework may be a better option.
+ */
+class ConfigServerFixture : public mongo::unittest::Test {
+public:
+ ConfigServerFixture();
/**
- * Fixture for testing complicated operations against a "virtual" config server.
- *
- * Use this if your test requires complex commands and writing to many collections,
- * otherwise a unit test in the mock framework may be a better option.
+ * Returns a connection std::string to the virtual config server.
*/
- class ConfigServerFixture: public mongo::unittest::Test {
- public:
-
- ConfigServerFixture();
-
- /**
- * Returns a connection std::string to the virtual config server.
- */
- ConnectionString configSvr() const {
- return ConnectionString(HostAndPort("$dummy:10000"));
- }
+ ConnectionString configSvr() const {
+ return ConnectionString(HostAndPort("$dummy:10000"));
+ }
- /**
- * Clears all data on the server
- */
- void clearServer();
-
- void clearVersion();
- void clearShards();
- void clearDatabases();
- void clearCollections();
- void clearChunks();
- void clearPings();
- void clearChangelog();
-
- /**
- * Dumps the contents of the config server to the log.
- */
- void dumpServer();
+ /**
+ * Clears all data on the server
+ */
+ void clearServer();
- protected:
+ void clearVersion();
+ void clearShards();
+ void clearDatabases();
+ void clearCollections();
+ void clearChunks();
+ void clearPings();
+ void clearChangelog();
- virtual void setUp();
+ /**
+ * Dumps the contents of the config server to the log.
+ */
+ void dumpServer();
- virtual void tearDown();
+protected:
+ virtual void setUp();
+ virtual void tearDown();
- OperationContextImpl _txn;
- CustomDirectClient _client;
- CustomConnectHook* _connectHook;
- };
+ OperationContextImpl _txn;
+ CustomDirectClient _client;
+ CustomConnectHook* _connectHook;
+};
}
diff --git a/src/mongo/dbtests/config_upgrade_tests.cpp b/src/mongo/dbtests/config_upgrade_tests.cpp
index 841ce48343e..df0cc4c6e84 100644
--- a/src/mongo/dbtests/config_upgrade_tests.cpp
+++ b/src/mongo/dbtests/config_upgrade_tests.cpp
@@ -41,238 +41,221 @@
namespace mongo {
- using std::string;
+using std::string;
+
+/**
+ * Specialization of the config server fixture with helpers for the tests below.
+ */
+class ConfigUpgradeFixture : public ConfigServerFixture {
+public:
+ ConfigUpgradeFixture() : ConfigServerFixture() {}
+
+ void stopBalancer() {
+ // Note: The balancer key is needed in the update portion, for some reason related to
+ // DBDirectClient
+ DBDirectClient client(&_txn);
+ client.update(SettingsType::ConfigNS,
+ BSON(SettingsType::key("balancer")),
+ BSON(SettingsType::key("balancer") << SettingsType::balancerStopped(true)),
+ true,
+ false);
+ }
/**
- * Specialization of the config server fixture with helpers for the tests below.
+ * Stores a legacy { version : X } config server entry
*/
- class ConfigUpgradeFixture: public ConfigServerFixture {
- public:
-
- ConfigUpgradeFixture() : ConfigServerFixture() {
-
+ void storeLegacyConfigVersion(int version) {
+ if (version == 0)
+ return;
+
+ DBDirectClient client(&_txn);
+
+ if (version == 1) {
+ ShardType shard;
+ shard.setName("test");
+ shard.setHost("$dummy:10000");
+ client.insert(ShardType::ConfigNS, shard.toBSON());
+ return;
}
- void stopBalancer() {
- // Note: The balancer key is needed in the update portion, for some reason related to
- // DBDirectClient
- DBDirectClient client(&_txn);
- client.update(SettingsType::ConfigNS,
- BSON(SettingsType::key("balancer")),
- BSON(SettingsType::key("balancer") << SettingsType::balancerStopped(true)),
- true, false);
- }
-
- /**
- * Stores a legacy { version : X } config server entry
- */
- void storeLegacyConfigVersion(int version) {
-
- if (version == 0) return;
-
- DBDirectClient client(&_txn);
+ client.insert(VersionType::ConfigNS, BSON("_id" << 1 << "version" << version));
+ }
- if (version == 1) {
- ShardType shard;
- shard.setName("test");
- shard.setHost("$dummy:10000");
- client.insert(ShardType::ConfigNS, shard.toBSON());
- return;
- }
+ /**
+ * Stores a newer { version, minVersion, currentVersion, clusterId } config server entry
+ */
+ void storeConfigVersion(const VersionType& versionInfo) {
+ DBDirectClient client(&_txn);
+ client.insert(VersionType::ConfigNS, versionInfo.toBSON());
+ }
- client.insert(VersionType::ConfigNS, BSON("_id" << 1 << "version" << version));
+ /**
+ * Stores a newer { version, minVersion, currentVersion, clusterId } config server entry.
+ *
+ * @return clusterId
+ */
+ OID storeConfigVersion(int configVersion) {
+ if (configVersion < CURRENT_CONFIG_VERSION) {
+ storeLegacyConfigVersion(configVersion);
+ return OID();
}
- /**
- * Stores a newer { version, minVersion, currentVersion, clusterId } config server entry
- */
- void storeConfigVersion(const VersionType& versionInfo) {
- DBDirectClient client(&_txn);
- client.insert(VersionType::ConfigNS, versionInfo.toBSON());
- }
+ VersionType version;
+ version.setMinCompatibleVersion(configVersion);
+ version.setCurrentVersion(configVersion);
- /**
- * Stores a newer { version, minVersion, currentVersion, clusterId } config server entry.
- *
- * @return clusterId
- */
- OID storeConfigVersion(int configVersion) {
+ OID clusterId = OID::gen();
- if (configVersion < CURRENT_CONFIG_VERSION) {
- storeLegacyConfigVersion(configVersion);
- return OID();
- }
+ version.setClusterId(clusterId);
- VersionType version;
- version.setMinCompatibleVersion(configVersion);
- version.setCurrentVersion(configVersion);
+ storeConfigVersion(version);
+ return clusterId;
+ }
- OID clusterId = OID::gen();
+ /**
+ * Stores sample shard and ping information at the current version.
+ */
+ void storeShardsAndPings(int numShards, int numPings) {
+ DBDirectClient client(&_txn);
- version.setClusterId(clusterId);
+ for (int i = 0; i < numShards; i++) {
+ ShardType shard;
+ shard.setName(OID::gen().toString());
+ shard.setHost((string)(str::stream() << "$dummyShard:" << (i + 1) << "0000"));
- storeConfigVersion(version);
- return clusterId;
+ client.insert(ShardType::ConfigNS, shard.toBSON());
}
- /**
- * Stores sample shard and ping information at the current version.
- */
- void storeShardsAndPings(int numShards, int numPings) {
- DBDirectClient client(&_txn);
-
- for (int i = 0; i < numShards; i++) {
- ShardType shard;
- shard.setName(OID::gen().toString());
- shard.setHost((string) (str::stream() << "$dummyShard:" << (i + 1) << "0000"));
+ for (int i = 0; i < numPings; i++) {
+ MongosType ping;
+ ping.setName((string)(str::stream() << "$dummyMongos:" << (i + 1) << "0000"));
+ ping.setPing(jsTime());
+ ping.setMongoVersion(versionString);
+ ping.setConfigVersion(CURRENT_CONFIG_VERSION);
- client.insert(ShardType::ConfigNS, shard.toBSON());
+ if (i % 2 == 0) {
+ ping.setPing(ping.getPing() - 10 * 60 * 1000);
}
- for (int i = 0; i < numPings; i++) {
-
- MongosType ping;
- ping.setName((string) (str::stream() << "$dummyMongos:" << (i + 1) << "0000"));
- ping.setPing(jsTime());
- ping.setMongoVersion(versionString);
- ping.setConfigVersion(CURRENT_CONFIG_VERSION);
+ client.insert(MongosType::ConfigNS, ping.toBSON());
+ }
+ }
+};
- if (i % 2 == 0) {
- ping.setPing(ping.getPing() - 10 * 60 * 1000);
- }
+//
+// Tests for upgrading the config server between versions.
+//
+// In general these tests do pretty minimal validation of the config server data itself, but
+// do ensure that the upgrade mechanism is working correctly w.r.t the config.version
+// collection.
+//
- client.insert(MongosType::ConfigNS, ping.toBSON());
- }
- }
- };
+// Rename the fixture so that our tests have a useful name in the executable
+typedef ConfigUpgradeFixture ConfigUpgradeTests;
+TEST_F(ConfigUpgradeTests, EmptyVersion) {
//
- // Tests for upgrading the config server between versions.
+ // Tests detection of empty config version
//
- // In general these tests do pretty minimal validation of the config server data itself, but
- // do ensure that the upgrade mechanism is working correctly w.r.t the config.version
- // collection.
- //
-
- // Rename the fixture so that our tests have a useful name in the executable
- typedef ConfigUpgradeFixture ConfigUpgradeTests;
-
- TEST_F(ConfigUpgradeTests, EmptyVersion) {
- //
- // Tests detection of empty config version
- //
+ // Zero version (no version doc)
+ VersionType oldVersion;
+ Status status = getConfigVersion(configSvr(), &oldVersion);
+ ASSERT(status.isOK());
- // Zero version (no version doc)
- VersionType oldVersion;
- Status status = getConfigVersion(configSvr(), &oldVersion);
- ASSERT(status.isOK());
+ ASSERT_EQUALS(oldVersion.getMinCompatibleVersion(), 0);
+ ASSERT_EQUALS(oldVersion.getCurrentVersion(), 0);
+}
- ASSERT_EQUALS(oldVersion.getMinCompatibleVersion(), 0);
- ASSERT_EQUALS(oldVersion.getCurrentVersion(), 0);
- }
-
- TEST_F(ConfigUpgradeTests, ClusterIDVersion) {
-
- //
- // Tests detection of newer config versions
- //
-
- VersionType newVersion;
- newVersion.setMinCompatibleVersion(MIN_COMPATIBLE_CONFIG_VERSION);
- newVersion.setCurrentVersion(CURRENT_CONFIG_VERSION);
- storeConfigVersion(newVersion);
+TEST_F(ConfigUpgradeTests, ClusterIDVersion) {
+ //
+ // Tests detection of newer config versions
+ //
- newVersion.clear();
+ VersionType newVersion;
+ newVersion.setMinCompatibleVersion(MIN_COMPATIBLE_CONFIG_VERSION);
+ newVersion.setCurrentVersion(CURRENT_CONFIG_VERSION);
+ storeConfigVersion(newVersion);
- // Current Version w/o clusterId (invalid!)
- Status status = getConfigVersion(configSvr(), &newVersion);
- ASSERT(!status.isOK());
+ newVersion.clear();
- newVersion.clear();
+ // Current Version w/o clusterId (invalid!)
+ Status status = getConfigVersion(configSvr(), &newVersion);
+ ASSERT(!status.isOK());
- OID clusterId = OID::gen();
- newVersion.setClusterId(clusterId);
- newVersion.setMinCompatibleVersion(MIN_COMPATIBLE_CONFIG_VERSION);
- newVersion.setCurrentVersion(CURRENT_CONFIG_VERSION);
+ newVersion.clear();
- clearVersion();
- storeConfigVersion(newVersion);
+ OID clusterId = OID::gen();
+ newVersion.setClusterId(clusterId);
+ newVersion.setMinCompatibleVersion(MIN_COMPATIBLE_CONFIG_VERSION);
+ newVersion.setCurrentVersion(CURRENT_CONFIG_VERSION);
- newVersion.clear();
+ clearVersion();
+ storeConfigVersion(newVersion);
- // Current version w/ clusterId (valid!)
- status = getConfigVersion(configSvr(), &newVersion);
- ASSERT(status.isOK());
+ newVersion.clear();
- ASSERT_EQUALS(newVersion.getMinCompatibleVersion(), MIN_COMPATIBLE_CONFIG_VERSION);
- ASSERT_EQUALS(newVersion.getCurrentVersion(), CURRENT_CONFIG_VERSION);
- ASSERT_EQUALS(newVersion.getClusterId(), clusterId);
- }
+ // Current version w/ clusterId (valid!)
+ status = getConfigVersion(configSvr(), &newVersion);
+ ASSERT(status.isOK());
- TEST_F(ConfigUpgradeTests, InitialUpgrade) {
+ ASSERT_EQUALS(newVersion.getMinCompatibleVersion(), MIN_COMPATIBLE_CONFIG_VERSION);
+ ASSERT_EQUALS(newVersion.getCurrentVersion(), CURRENT_CONFIG_VERSION);
+ ASSERT_EQUALS(newVersion.getClusterId(), clusterId);
+}
- //
- // Tests initializing the config server to the initial version
- //
+TEST_F(ConfigUpgradeTests, InitialUpgrade) {
+ //
+ // Tests initializing the config server to the initial version
+ //
- // Empty version
- VersionType versionOld;
- VersionType version;
- string errMsg;
- bool result = checkAndUpgradeConfigVersion(configSvr(),
- false,
- &versionOld,
- &version,
- &errMsg);
-
- ASSERT(result);
- ASSERT_EQUALS(versionOld.getCurrentVersion(), 0);
- ASSERT_EQUALS(version.getMinCompatibleVersion(), MIN_COMPATIBLE_CONFIG_VERSION);
- ASSERT_EQUALS(version.getCurrentVersion(), CURRENT_CONFIG_VERSION);
- ASSERT_NOT_EQUALS(version.getClusterId(), OID());
- }
+ // Empty version
+ VersionType versionOld;
+ VersionType version;
+ string errMsg;
+ bool result = checkAndUpgradeConfigVersion(configSvr(), false, &versionOld, &version, &errMsg);
- TEST_F(ConfigUpgradeTests, BadVersionUpgrade) {
+ ASSERT(result);
+ ASSERT_EQUALS(versionOld.getCurrentVersion(), 0);
+ ASSERT_EQUALS(version.getMinCompatibleVersion(), MIN_COMPATIBLE_CONFIG_VERSION);
+ ASSERT_EQUALS(version.getCurrentVersion(), CURRENT_CONFIG_VERSION);
+ ASSERT_NOT_EQUALS(version.getClusterId(), OID());
+}
- //
- // Tests that we can't upgrade from a config version we don't have an upgrade path for
- //
+TEST_F(ConfigUpgradeTests, BadVersionUpgrade) {
+ //
+ // Tests that we can't upgrade from a config version we don't have an upgrade path for
+ //
- stopBalancer();
+ stopBalancer();
- storeLegacyConfigVersion(1);
+ storeLegacyConfigVersion(1);
- // Default version (not upgradeable)
- VersionType versionOld;
- VersionType version;
- string errMsg;
- bool result = checkAndUpgradeConfigVersion(configSvr(),
- false,
- &versionOld,
- &version,
- &errMsg);
-
- ASSERT(!result);
- }
+ // Default version (not upgradeable)
+ VersionType versionOld;
+ VersionType version;
+ string errMsg;
+ bool result = checkAndUpgradeConfigVersion(configSvr(), false, &versionOld, &version, &errMsg);
- TEST_F(ConfigUpgradeTests, CheckMongoVersion) {
+ ASSERT(!result);
+}
- //
- // Tests basic detection of existing mongos and mongod versions from mongos ping
- // and shard info. Fuller tests require conns to multiple version mongos processes, not
- // done here.
- //
+TEST_F(ConfigUpgradeTests, CheckMongoVersion) {
+ //
+ // Tests basic detection of existing mongos and mongod versions from mongos ping
+ // and shard info. Fuller tests require conns to multiple version mongos processes, not
+ // done here.
+ //
- storeShardsAndPings(5, 10); // 5 shards, 10 pings
+ storeShardsAndPings(5, 10); // 5 shards, 10 pings
- // Our version is >= 2.2, so this works
- Status status = checkClusterMongoVersions(configSvr(), "2.2");
- ASSERT(status.isOK());
+ // Our version is >= 2.2, so this works
+ Status status = checkClusterMongoVersions(configSvr(), "2.2");
+ ASSERT(status.isOK());
- // Our version is < 9.9, so this doesn't work (until we hit v99.99)
- status = checkClusterMongoVersions(configSvr(), "99.99");
- ASSERT(status.code() == ErrorCodes::RemoteValidationError);
- }
+ // Our version is < 9.9, so this doesn't work (until we hit v99.99)
+ status = checkClusterMongoVersions(configSvr(), "99.99");
+ ASSERT(status.code() == ErrorCodes::RemoteValidationError);
+}
-} // end namespace
+} // end namespace
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 918b87ed3d7..006f0b0faaf 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -41,139 +41,133 @@
namespace CountTests {
- class Base {
- public:
- Base() : _txn(),
- _scopedXact(&_txn, MODE_IX),
- _lk(_txn.lockState(),
- nsToDatabaseSubstring(ns()), MODE_X),
- _context(&_txn, ns()),
- _client(&_txn) {
-
- _database = _context.db();
-
- {
- WriteUnitOfWork wunit(&_txn);
- _collection = _database->getCollection( ns() );
- if ( _collection ) {
- _database->dropCollection( &_txn, ns() );
- }
- _collection = _database->createCollection( &_txn, ns() );
- wunit.commit();
- }
-
- addIndex( fromjson( "{\"a\":1}" ) );
- }
- ~Base() {
- try {
- WriteUnitOfWork wunit(&_txn);
- uassertStatusOK( _database->dropCollection( &_txn, ns() ) );
- wunit.commit();
- }
- catch ( ... ) {
- FAIL( "Exception while cleaning up collection" );
- }
- }
-
- protected:
-
- static const char *ns() {
- return "unittests.counttests";
- }
-
- void addIndex( const BSONObj &key ) {
- Helpers::ensureIndex(&_txn,
- _collection,
- key,
- /*unique=*/ false,
- /*name=*/ key.firstElementFieldName());
- }
-
- void insert( const char *s ) {
+class Base {
+public:
+ Base()
+ : _txn(),
+ _scopedXact(&_txn, MODE_IX),
+ _lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
+ _context(&_txn, ns()),
+ _client(&_txn) {
+ _database = _context.db();
+
+ {
WriteUnitOfWork wunit(&_txn);
- const BSONObj o = fromjson(s);
-
- if ( o["_id"].eoo() ) {
- BSONObjBuilder b;
- OID oid;
- oid.init();
- b.appendOID( "_id", &oid );
- b.appendElements( o );
- _collection->insertDocument( &_txn, b.obj(), false );
- }
- else {
- _collection->insertDocument( &_txn, o, false );
+ _collection = _database->getCollection(ns());
+ if (_collection) {
+ _database->dropCollection(&_txn, ns());
}
+ _collection = _database->createCollection(&_txn, ns());
wunit.commit();
}
-
- OperationContextImpl _txn;
- ScopedTransaction _scopedXact;
- Lock::DBLock _lk;
-
- Client::Context _context;
-
- Database* _database;
- Collection* _collection;
-
- DBDirectClient _client;
- };
-
- class Basic : public Base {
- public:
- void run() {
- insert("{\"a\":\"b\"}");
- insert("{\"c\":\"d\"}");
- ASSERT_EQUALS(2ULL, _client.count(ns(), fromjson("{}")));
- }
- };
-
- class Query : public Base {
- public:
- void run() {
- insert( "{\"a\":\"b\"}" );
- insert( "{\"a\":\"b\",\"x\":\"y\"}" );
- insert( "{\"a\":\"c\"}" );
- ASSERT_EQUALS(2ULL, _client.count(ns(), fromjson("{\"a\":\"b\"}")));
- }
- };
-
- class QueryFields : public Base {
- public:
- void run() {
- insert( "{\"a\":\"b\"}" );
- insert( "{\"a\":\"c\"}" );
- insert( "{\"d\":\"e\"}" );
- ASSERT_EQUALS(1ULL, _client.count(ns(), fromjson("{\"a\":\"b\"}")));
- }
- };
-
- class IndexedRegex : public Base {
- public:
- void run() {
- insert( "{\"a\":\"c\"}" );
- insert( "{\"a\":\"b\"}" );
- insert( "{\"a\":\"d\"}" );
- ASSERT_EQUALS(1ULL, _client.count(ns(), fromjson("{\"a\":/^b/}")));
- }
- };
-
-
- class All : public Suite {
- public:
- All() : Suite( "count" ) {
-
+ addIndex(fromjson("{\"a\":1}"));
+ }
+ ~Base() {
+ try {
+ WriteUnitOfWork wunit(&_txn);
+ uassertStatusOK(_database->dropCollection(&_txn, ns()));
+ wunit.commit();
+ } catch (...) {
+ FAIL("Exception while cleaning up collection");
}
-
- void setupTests() {
- add<Basic>();
- add<Query>();
- add<QueryFields>();
- add<IndexedRegex>();
+ }
+
+protected:
+ static const char* ns() {
+ return "unittests.counttests";
+ }
+
+ void addIndex(const BSONObj& key) {
+ Helpers::ensureIndex(&_txn,
+ _collection,
+ key,
+ /*unique=*/false,
+ /*name=*/key.firstElementFieldName());
+ }
+
+ void insert(const char* s) {
+ WriteUnitOfWork wunit(&_txn);
+ const BSONObj o = fromjson(s);
+
+ if (o["_id"].eoo()) {
+ BSONObjBuilder b;
+ OID oid;
+ oid.init();
+ b.appendOID("_id", &oid);
+ b.appendElements(o);
+ _collection->insertDocument(&_txn, b.obj(), false);
+ } else {
+ _collection->insertDocument(&_txn, o, false);
}
- };
-
- SuiteInstance<All> myall;
-
-} // namespace CountTests
+ wunit.commit();
+ }
+
+
+ OperationContextImpl _txn;
+ ScopedTransaction _scopedXact;
+ Lock::DBLock _lk;
+
+ Client::Context _context;
+
+ Database* _database;
+ Collection* _collection;
+
+ DBDirectClient _client;
+};
+
+class Basic : public Base {
+public:
+ void run() {
+ insert("{\"a\":\"b\"}");
+ insert("{\"c\":\"d\"}");
+ ASSERT_EQUALS(2ULL, _client.count(ns(), fromjson("{}")));
+ }
+};
+
+class Query : public Base {
+public:
+ void run() {
+ insert("{\"a\":\"b\"}");
+ insert("{\"a\":\"b\",\"x\":\"y\"}");
+ insert("{\"a\":\"c\"}");
+ ASSERT_EQUALS(2ULL, _client.count(ns(), fromjson("{\"a\":\"b\"}")));
+ }
+};
+
+class QueryFields : public Base {
+public:
+ void run() {
+ insert("{\"a\":\"b\"}");
+ insert("{\"a\":\"c\"}");
+ insert("{\"d\":\"e\"}");
+ ASSERT_EQUALS(1ULL, _client.count(ns(), fromjson("{\"a\":\"b\"}")));
+ }
+};
+
+class IndexedRegex : public Base {
+public:
+ void run() {
+ insert("{\"a\":\"c\"}");
+ insert("{\"a\":\"b\"}");
+ insert("{\"a\":\"d\"}");
+ ASSERT_EQUALS(1ULL, _client.count(ns(), fromjson("{\"a\":/^b/}")));
+ }
+};
+
+
+class All : public Suite {
+public:
+ All() : Suite("count") {}
+
+ void setupTests() {
+ add<Basic>();
+ add<Query>();
+ add<QueryFields>();
+ add<IndexedRegex>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace CountTests
diff --git a/src/mongo/dbtests/dbclient_multi_command_test.cpp b/src/mongo/dbtests/dbclient_multi_command_test.cpp
index c5d60b24e4e..0a09bce0dd0 100644
--- a/src/mongo/dbtests/dbclient_multi_command_test.cpp
+++ b/src/mongo/dbtests/dbclient_multi_command_test.cpp
@@ -32,11 +32,11 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- TEST(DBClientMultiCommandTest, Basic) {
- DBClientMultiCommand();
- ASSERT( true );
- }
+TEST(DBClientMultiCommandTest, Basic) {
+ DBClientMultiCommand();
+ ASSERT(true);
+}
-} // end namespace
+} // end namespace
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 913618d7b29..218f685e33c 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -38,224 +38,194 @@
namespace mongo {
- using std::auto_ptr;
- using std::set;
+using std::auto_ptr;
+using std::set;
- /**
- * Unit tests related to DBHelpers
- */
+/**
+ * Unit tests related to DBHelpers
+ */
- static const char * const ns = "unittests.removetests";
+static const char* const ns = "unittests.removetests";
- // TODO: Normalize with test framework
- /** Simple test for Helpers::RemoveRange. */
- class RemoveRange {
- public:
- RemoveRange() :
- _min( 4 ), _max( 8 )
- {
- }
+// TODO: Normalize with test framework
+/** Simple test for Helpers::RemoveRange. */
+class RemoveRange {
+public:
+ RemoveRange() : _min(4), _max(8) {}
- void run() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- for ( int i = 0; i < 10; ++i ) {
- client.insert( ns, BSON( "_id" << i ) );
- }
-
- {
- // Remove _id range [_min, _max).
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_X);
- Client::Context ctx(&txn, ns );
-
- KeyRange range( ns,
- BSON( "_id" << _min ),
- BSON( "_id" << _max ),
- BSON( "_id" << 1 ) );
- mongo::WriteConcernOptions dummyWriteConcern;
- Helpers::removeRange(&txn, range, false, dummyWriteConcern);
- }
-
- // Check that the expected documents remain.
- ASSERT_EQUALS( expected(), docs(&txn) );
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ for (int i = 0; i < 10; ++i) {
+ client.insert(ns, BSON("_id" << i));
}
- private:
- BSONArray expected() const {
- BSONArrayBuilder bab;
- for ( int i = 0; i < _min; ++i ) {
- bab << BSON( "_id" << i );
- }
- for ( int i = _max; i < 10; ++i ) {
- bab << BSON( "_id" << i );
- }
- return bab.arr();
+
+ {
+ // Remove _id range [_min, _max).
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ Client::Context ctx(&txn, ns);
+
+ KeyRange range(ns, BSON("_id" << _min), BSON("_id" << _max), BSON("_id" << 1));
+ mongo::WriteConcernOptions dummyWriteConcern;
+ Helpers::removeRange(&txn, range, false, dummyWriteConcern);
}
- BSONArray docs(OperationContext* txn) const {
- DBDirectClient client(txn);
- auto_ptr<DBClientCursor> cursor = client.query( ns,
- Query().hint( BSON( "_id" << 1 ) ) );
- BSONArrayBuilder bab;
- while ( cursor->more() ) {
- bab << cursor->next();
- }
- return bab.arr();
+ // Check that the expected documents remain.
+ ASSERT_EQUALS(expected(), docs(&txn));
+ }
+
+private:
+ BSONArray expected() const {
+ BSONArrayBuilder bab;
+ for (int i = 0; i < _min; ++i) {
+ bab << BSON("_id" << i);
}
- int _min;
- int _max;
- };
-
- class All: public Suite {
- public:
- All() :
- Suite( "remove" )
- {
+ for (int i = _max; i < 10; ++i) {
+ bab << BSON("_id" << i);
}
- void setupTests() {
- add<RemoveRange>();
+ return bab.arr();
+ }
+
+ BSONArray docs(OperationContext* txn) const {
+ DBDirectClient client(txn);
+ auto_ptr<DBClientCursor> cursor = client.query(ns, Query().hint(BSON("_id" << 1)));
+ BSONArrayBuilder bab;
+ while (cursor->more()) {
+ bab << cursor->next();
}
- } myall;
+ return bab.arr();
+ }
+ int _min;
+ int _max;
+};
+
+class All : public Suite {
+public:
+ All() : Suite("remove") {}
+ void setupTests() {
+ add<RemoveRange>();
+ }
+} myall;
- //
- // Tests getting disk locs for an index range
- //
+//
+// Tests getting disk locs for an index range
+//
- TEST(DBHelperTests, FindDiskLocs) {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
+TEST(DBHelperTests, FindDiskLocs) {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
- // Some unique tag we can use to make sure we're pulling back the right data
- OID tag = OID::gen();
- client.remove( ns, BSONObj() );
+ // Some unique tag we can use to make sure we're pulling back the right data
+ OID tag = OID::gen();
+ client.remove(ns, BSONObj());
- int numDocsInserted = 10;
- for ( int i = 0; i < numDocsInserted; ++i ) {
- client.insert( ns, BSON( "_id" << i << "tag" << tag ) );
- }
+ int numDocsInserted = 10;
+ for (int i = 0; i < numDocsInserted; ++i) {
+ client.insert(ns, BSON("_id" << i << "tag" << tag));
+ }
- long long maxSizeBytes = 1024 * 1024 * 1024;
+ long long maxSizeBytes = 1024 * 1024 * 1024;
- set<RecordId> locs;
- long long numDocsFound;
- long long estSizeBytes;
- {
- // search _id range (0, 10)
- ScopedTransaction transaction(&txn, MODE_IS);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_S);
-
- KeyRange range( ns,
- BSON( "_id" << 0 ),
- BSON( "_id" << numDocsInserted ),
- BSON( "_id" << 1 ) );
-
- Status result = Helpers::getLocsInRange( &txn,
- range,
- maxSizeBytes,
- &locs,
- &numDocsFound,
- &estSizeBytes );
-
- ASSERT_EQUALS( result, Status::OK() );
- ASSERT_EQUALS( numDocsFound, numDocsInserted );
- ASSERT_NOT_EQUALS( estSizeBytes, 0 );
- ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes );
-
- Database* db = dbHolder().get( &txn, nsToDatabase(range.ns) );
- const Collection* collection = db->getCollection(ns);
-
- // Make sure all the disklocs actually correspond to the right info
- for ( set<RecordId>::const_iterator it = locs.begin(); it != locs.end(); ++it ) {
- const BSONObj obj = collection->docFor(&txn, *it).value();
- ASSERT_EQUALS(obj["tag"].OID(), tag);
- }
+ set<RecordId> locs;
+ long long numDocsFound;
+ long long estSizeBytes;
+ {
+ // search _id range (0, 10)
+ ScopedTransaction transaction(&txn, MODE_IS);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_S);
+
+ KeyRange range(ns, BSON("_id" << 0), BSON("_id" << numDocsInserted), BSON("_id" << 1));
+
+ Status result =
+ Helpers::getLocsInRange(&txn, range, maxSizeBytes, &locs, &numDocsFound, &estSizeBytes);
+
+ ASSERT_EQUALS(result, Status::OK());
+ ASSERT_EQUALS(numDocsFound, numDocsInserted);
+ ASSERT_NOT_EQUALS(estSizeBytes, 0);
+ ASSERT_LESS_THAN(estSizeBytes, maxSizeBytes);
+
+ Database* db = dbHolder().get(&txn, nsToDatabase(range.ns));
+ const Collection* collection = db->getCollection(ns);
+
+ // Make sure all the disklocs actually correspond to the right info
+ for (set<RecordId>::const_iterator it = locs.begin(); it != locs.end(); ++it) {
+ const BSONObj obj = collection->docFor(&txn, *it).value();
+ ASSERT_EQUALS(obj["tag"].OID(), tag);
}
}
+}
- //
- // Tests index not found error getting disk locs
- //
+//
+// Tests index not found error getting disk locs
+//
- TEST(DBHelperTests, FindDiskLocsNoIndex) {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
+TEST(DBHelperTests, FindDiskLocsNoIndex) {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
- client.remove( ns, BSONObj() );
- client.insert( ns, BSON( "_id" << OID::gen() ) );
+ client.remove(ns, BSONObj());
+ client.insert(ns, BSON("_id" << OID::gen()));
- long long maxSizeBytes = 1024 * 1024 * 1024;
+ long long maxSizeBytes = 1024 * 1024 * 1024;
- set<RecordId> locs;
- long long numDocsFound;
- long long estSizeBytes;
- {
- ScopedTransaction transaction(&txn, MODE_IS);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_S);
-
- // search invalid index range
- KeyRange range( ns,
- BSON( "badIndex" << 0 ),
- BSON( "badIndex" << 10 ),
- BSON( "badIndex" << 1 ) );
-
- Status result = Helpers::getLocsInRange( &txn,
- range,
- maxSizeBytes,
- &locs,
- &numDocsFound,
- &estSizeBytes );
-
- // Make sure we get the right error code
- ASSERT_EQUALS( result.code(), ErrorCodes::IndexNotFound );
- ASSERT_EQUALS( static_cast<long long>( locs.size() ), 0 );
- ASSERT_EQUALS( numDocsFound, 0 );
- ASSERT_EQUALS( estSizeBytes, 0 );
- }
+ set<RecordId> locs;
+ long long numDocsFound;
+ long long estSizeBytes;
+ {
+ ScopedTransaction transaction(&txn, MODE_IS);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_S);
+
+ // search invalid index range
+ KeyRange range(ns, BSON("badIndex" << 0), BSON("badIndex" << 10), BSON("badIndex" << 1));
+
+ Status result =
+ Helpers::getLocsInRange(&txn, range, maxSizeBytes, &locs, &numDocsFound, &estSizeBytes);
+
+ // Make sure we get the right error code
+ ASSERT_EQUALS(result.code(), ErrorCodes::IndexNotFound);
+ ASSERT_EQUALS(static_cast<long long>(locs.size()), 0);
+ ASSERT_EQUALS(numDocsFound, 0);
+ ASSERT_EQUALS(estSizeBytes, 0);
}
+}
- //
- // Tests chunk too big error getting disk locs
- //
+//
+// Tests chunk too big error getting disk locs
+//
- TEST(DBHelperTests, FindDiskLocsTooBig) {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
+TEST(DBHelperTests, FindDiskLocsTooBig) {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
- client.remove( ns, BSONObj() );
+ client.remove(ns, BSONObj());
- int numDocsInserted = 10;
- for ( int i = 0; i < numDocsInserted; ++i ) {
- client.insert( ns, BSON( "_id" << i ) );
- }
+ int numDocsInserted = 10;
+ for (int i = 0; i < numDocsInserted; ++i) {
+ client.insert(ns, BSON("_id" << i));
+ }
- // Very small max size
- long long maxSizeBytes = 10;
+ // Very small max size
+ long long maxSizeBytes = 10;
- set<RecordId> locs;
- long long numDocsFound;
- long long estSizeBytes;
- {
- ScopedTransaction transaction(&txn, MODE_IS);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_S);
-
- KeyRange range( ns,
- BSON( "_id" << 0 ),
- BSON( "_id" << numDocsInserted ),
- BSON( "_id" << 1 ) );
-
- Status result = Helpers::getLocsInRange( &txn,
- range,
- maxSizeBytes,
- &locs,
- &numDocsFound,
- &estSizeBytes );
-
- // Make sure we get the right error code and our count and size estimates are valid
- ASSERT_EQUALS( result.code(), ErrorCodes::InvalidLength );
- ASSERT_EQUALS( numDocsFound, numDocsInserted );
- ASSERT_GREATER_THAN( estSizeBytes, maxSizeBytes );
- }
+ set<RecordId> locs;
+ long long numDocsFound;
+ long long estSizeBytes;
+ {
+ ScopedTransaction transaction(&txn, MODE_IS);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_S);
+
+ KeyRange range(ns, BSON("_id" << 0), BSON("_id" << numDocsInserted), BSON("_id" << 1));
+
+ Status result =
+ Helpers::getLocsInRange(&txn, range, maxSizeBytes, &locs, &numDocsFound, &estSizeBytes);
+
+ // Make sure we get the right error code and our count and size estimates are valid
+ ASSERT_EQUALS(result.code(), ErrorCodes::InvalidLength);
+ ASSERT_EQUALS(numDocsFound, numDocsInserted);
+ ASSERT_GREATER_THAN(estSizeBytes, maxSizeBytes);
}
+}
-} // namespace RemoveTests
+} // namespace RemoveTests
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index 6e419a6cc4a..3fbe9fafcbf 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -52,56 +52,50 @@
namespace mongo {
namespace dbtests {
- // This specifies default dbpath for our testing framework
- const std::string default_test_dbpath = "/tmp/unittest";
+// This specifies default dbpath for our testing framework
+const std::string default_test_dbpath = "/tmp/unittest";
- Status createIndex(OperationContext* txn,
- const StringData &ns,
- const BSONObj& keys,
- bool unique) {
- BSONObjBuilder specBuilder;
- specBuilder <<
- "name" << DBClientBase::genIndexName(keys) <<
- "ns" << ns <<
- "key" << keys;
- if (unique) {
- specBuilder << "unique" << true;
- }
- return createIndexFromSpec(txn, ns, specBuilder.done());
+Status createIndex(OperationContext* txn, const StringData& ns, const BSONObj& keys, bool unique) {
+ BSONObjBuilder specBuilder;
+ specBuilder << "name" << DBClientBase::genIndexName(keys) << "ns" << ns << "key" << keys;
+ if (unique) {
+ specBuilder << "unique" << true;
}
+ return createIndexFromSpec(txn, ns, specBuilder.done());
+}
- Status createIndexFromSpec(OperationContext* txn, const StringData& ns, const BSONObj& spec) {
- AutoGetOrCreateDb autoDb(txn, nsToDatabaseSubstring(ns), MODE_X);
- Collection* coll;
- {
- WriteUnitOfWork wunit(txn);
- coll = autoDb.getDb()->getOrCreateCollection(txn, ns);
- invariant(coll);
- wunit.commit();
- }
- MultiIndexBlock indexer(txn, coll);
- Status status = indexer.init(spec);
- if (status == ErrorCodes::IndexAlreadyExists) {
- return Status::OK();
- }
- if (!status.isOK()) {
- return status;
- }
- status = indexer.insertAllDocumentsInCollection();
- if (!status.isOK()) {
- return status;
- }
+Status createIndexFromSpec(OperationContext* txn, const StringData& ns, const BSONObj& spec) {
+ AutoGetOrCreateDb autoDb(txn, nsToDatabaseSubstring(ns), MODE_X);
+ Collection* coll;
+ {
WriteUnitOfWork wunit(txn);
- indexer.commit();
+ coll = autoDb.getDb()->getOrCreateCollection(txn, ns);
+ invariant(coll);
wunit.commit();
+ }
+ MultiIndexBlock indexer(txn, coll);
+ Status status = indexer.init(spec);
+ if (status == ErrorCodes::IndexAlreadyExists) {
return Status::OK();
}
+ if (!status.isOK()) {
+ return status;
+ }
+ status = indexer.insertAllDocumentsInCollection();
+ if (!status.isOK()) {
+ return status;
+ }
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
+ return Status::OK();
+}
} // namespace dbtests
-} // namespace mongo
+} // namespace mongo
-int dbtestsMain( int argc, char** argv, char** envp ) {
+int dbtestsMain(int argc, char** argv, char** envp) {
static StaticObserver StaticObserver;
::mongo::setupSynchronousSignalHandlers();
setGlobalEnvironment(new GlobalEnvironmentMongoD());
diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h
index 159466f82b0..d61546c713f 100644
--- a/src/mongo/dbtests/dbtests.h
+++ b/src/mongo/dbtests/dbtests.h
@@ -41,26 +41,26 @@ using namespace mongo::unittest;
using boost::shared_ptr;
namespace mongo {
- class BSONObj;
- class OperationContext;
- class Status;
- class StringData;
+class BSONObj;
+class OperationContext;
+class Status;
+class StringData;
namespace dbtests {
- // This specifies default dbpath for our testing framework
- extern const std::string default_test_dbpath;
+// This specifies default dbpath for our testing framework
+extern const std::string default_test_dbpath;
- /**
- * Creates an index if it does not already exist.
- */
- Status createIndex(OperationContext* txn,
- const StringData &ns,
- const BSONObj& keys,
- bool unique = false);
+/**
+ * Creates an index if it does not already exist.
+ */
+Status createIndex(OperationContext* txn,
+ const StringData& ns,
+ const BSONObj& keys,
+ bool unique = false);
- /**
- * Creates an index from a BSON spec, if it does not already exist.
- */
- Status createIndexFromSpec(OperationContext* txn, const StringData& ns, const BSONObj& spec);
+/**
+ * Creates an index from a BSON spec, if it does not already exist.
+ */
+Status createIndexFromSpec(OperationContext* txn, const StringData& ns, const BSONObj& spec);
} // namespace dbtests
} // namespace mongo
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index 822db3b58f4..201f415d472 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -43,163 +43,165 @@
namespace DirectClientTests {
- using std::auto_ptr;
- using std::vector;
-
- class ClientBase {
- public:
- ClientBase() {
- _prevError = mongo::lastError._get( false );
- mongo::lastError.release();
- mongo::lastError.reset( new LastError() );
- }
- virtual ~ClientBase() {
- mongo::lastError.reset( _prevError );
- }
- private:
- LastError* _prevError;
- };
-
- const char *ns = "a.b";
-
- class Capped : public ClientBase {
- public:
- virtual void run() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
- for( int pass=0; pass < 3; pass++ ) {
- client.createCollection(ns, 1024 * 1024, true, 999);
- for( int j =0; j < pass*3; j++ )
- client.insert(ns, BSON("x" << j));
-
- // test truncation of a capped collection
- if( pass ) {
- BSONObj info;
- BSONObj cmd = BSON( "captrunc" << "b" << "n" << 1 << "inc" << true );
- //cout << cmd.toString() << endl;
- bool ok = client.runCommand("a", cmd, info);
- //cout << info.toString() << endl;
- verify(ok);
- }
-
- verify( client.dropCollection(ns) );
+using std::auto_ptr;
+using std::vector;
+
+class ClientBase {
+public:
+ ClientBase() {
+ _prevError = mongo::lastError._get(false);
+ mongo::lastError.release();
+ mongo::lastError.reset(new LastError());
+ }
+ virtual ~ClientBase() {
+ mongo::lastError.reset(_prevError);
+ }
+
+private:
+ LastError* _prevError;
+};
+
+const char* ns = "a.b";
+
+class Capped : public ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+ for (int pass = 0; pass < 3; pass++) {
+ client.createCollection(ns, 1024 * 1024, true, 999);
+ for (int j = 0; j < pass * 3; j++)
+ client.insert(ns, BSON("x" << j));
+
+ // test truncation of a capped collection
+ if (pass) {
+ BSONObj info;
+ BSONObj cmd = BSON("captrunc"
+ << "b"
+ << "n" << 1 << "inc" << true);
+ // cout << cmd.toString() << endl;
+ bool ok = client.runCommand("a", cmd, info);
+ // cout << info.toString() << endl;
+ verify(ok);
}
- }
- };
-
- class InsertMany : ClientBase {
- public:
- virtual void run(){
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- vector<BSONObj> objs;
- objs.push_back(BSON("_id" << 1));
- objs.push_back(BSON("_id" << 1));
- objs.push_back(BSON("_id" << 2));
-
-
- client.dropCollection(ns);
- client.insert(ns, objs);
- ASSERT_EQUALS(client.getLastErrorDetailed()["code"].numberInt(), 11000);
- ASSERT_EQUALS((int)client.count(ns), 1);
-
- client.dropCollection(ns);
- client.insert(ns, objs, InsertOption_ContinueOnError);
- ASSERT_EQUALS(client.getLastErrorDetailed()["code"].numberInt(), 11000);
- ASSERT_EQUALS((int)client.count(ns), 2);
- }
-
- };
-
- class BadNSCmd : ClientBase {
- public:
- virtual void run(){
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- BSONObj result;
- BSONObj cmdObj = BSON( "count" << "" );
- ASSERT_THROWS( client.runCommand( "", cmdObj, result ), UserException );
- }
- };
-
- class BadNSQuery : ClientBase {
- public:
- virtual void run(){
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- auto_ptr<DBClientCursor> cursor = client.query( "", Query(), 1 );
- ASSERT(cursor->more());
- BSONObj result = cursor->next().getOwned();
- ASSERT( result.hasField( "$err" ));
- ASSERT_EQUALS(result["code"].Int(), 16256);
- }
- };
-
- class BadNSGetMore : ClientBase {
- public:
- virtual void run(){
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- auto_ptr<DBClientCursor> cursor = client.getMore("", 1, 1);
- ASSERT(cursor->more());
- BSONObj result = cursor->next().getOwned();
- ASSERT(result.hasField("$err"));
- ASSERT_EQUALS(result["code"].Int(), 16258);
- }
- };
-
- class BadNSInsert : ClientBase {
- public:
- virtual void run(){
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.insert( "", BSONObj(), 0 );
- ASSERT( !client.getLastError().empty() );
- }
- };
- class BadNSUpdate : ClientBase {
- public:
- virtual void run(){
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.update( "", Query(), BSON( "$set" << BSON( "x" << 1 )) );
- ASSERT( !client.getLastError().empty() );
- }
- };
-
- class BadNSRemove : ClientBase {
- public:
- virtual void run(){
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.remove( "", Query() );
- ASSERT( !client.getLastError().empty() );
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "directclient" ) {
- }
- void setupTests() {
- add< Capped >();
- add< InsertMany >();
- add< BadNSCmd >();
- add< BadNSQuery >();
- add< BadNSGetMore >();
- add< BadNSInsert >();
- add< BadNSUpdate >();
- add< BadNSRemove >();
+ verify(client.dropCollection(ns));
}
- };
-
- SuiteInstance<All> myall;
+ }
+};
+
+class InsertMany : ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ vector<BSONObj> objs;
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 2));
+
+
+ client.dropCollection(ns);
+ client.insert(ns, objs);
+ ASSERT_EQUALS(client.getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client.count(ns), 1);
+
+ client.dropCollection(ns);
+ client.insert(ns, objs, InsertOption_ContinueOnError);
+ ASSERT_EQUALS(client.getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client.count(ns), 2);
+ }
+};
+
+class BadNSCmd : ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ BSONObj result;
+ BSONObj cmdObj = BSON("count"
+ << "");
+ ASSERT_THROWS(client.runCommand("", cmdObj, result), UserException);
+ }
+};
+
+class BadNSQuery : ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ auto_ptr<DBClientCursor> cursor = client.query("", Query(), 1);
+ ASSERT(cursor->more());
+ BSONObj result = cursor->next().getOwned();
+ ASSERT(result.hasField("$err"));
+ ASSERT_EQUALS(result["code"].Int(), 16256);
+ }
+};
+
+class BadNSGetMore : ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ auto_ptr<DBClientCursor> cursor = client.getMore("", 1, 1);
+ ASSERT(cursor->more());
+ BSONObj result = cursor->next().getOwned();
+ ASSERT(result.hasField("$err"));
+ ASSERT_EQUALS(result["code"].Int(), 16258);
+ }
+};
+
+class BadNSInsert : ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.insert("", BSONObj(), 0);
+ ASSERT(!client.getLastError().empty());
+ }
+};
+
+class BadNSUpdate : ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.update("", Query(), BSON("$set" << BSON("x" << 1)));
+ ASSERT(!client.getLastError().empty());
+ }
+};
+
+class BadNSRemove : ClientBase {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.remove("", Query());
+ ASSERT(!client.getLastError().empty());
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("directclient") {}
+ void setupTests() {
+ add<Capped>();
+ add<InsertMany>();
+ add<BadNSCmd>();
+ add<BadNSQuery>();
+ add<BadNSGetMore>();
+ add<BadNSInsert>();
+ add<BadNSUpdate>();
+ add<BadNSRemove>();
+ }
+};
+
+SuiteInstance<All> myall;
}
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index fc15e8937e6..caf647802af 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -46,1956 +46,2088 @@
namespace DocumentSourceTests {
- using boost::intrusive_ptr;
- using boost::shared_ptr;
- using std::map;
- using std::set;
- using std::string;
- using std::vector;
+using boost::intrusive_ptr;
+using boost::shared_ptr;
+using std::map;
+using std::set;
+using std::string;
+using std::vector;
+
+static const char* const ns = "unittests.documentsourcetests";
+static const BSONObj metaTextScore = BSON("$meta"
+ << "textScore");
+
+BSONObj toBson(const intrusive_ptr<DocumentSource>& source) {
+ vector<Value> arr;
+ source->serializeToArray(arr);
+ ASSERT_EQUALS(arr.size(), 1UL);
+ return arr[0].getDocument().toBson();
+}
+
+class CollectionBase {
+public:
+ CollectionBase() : client(&_opCtx) {}
+
+ ~CollectionBase() {
+ client.dropCollection(ns);
+ }
+
+protected:
+ OperationContextImpl _opCtx;
+ DBDirectClient client;
+};
+
+namespace DocumentSourceClass {
+using mongo::DocumentSource;
+
+template <size_t ArrayLen>
+set<string> arrayToSet(const char*(&array)[ArrayLen]) {
+ set<string> out;
+ for (size_t i = 0; i < ArrayLen; i++)
+ out.insert(array[i]);
+ return out;
+}
+
+class Deps {
+public:
+ void run() {
+ {
+ const char* array[] = {"a", "b"}; // basic
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
+ }
+ {
+ const char* array[] = {"a", "ab"}; // prefixed but not subfield
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "ab" << 1 << "_id" << 0));
+ }
+ {
+ const char* array[] = {"a", "b", "a.b"}; // a.b included by a
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
+ }
+ {
+ const char* array[] = {"a", "_id"}; // _id now included
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
+ }
+ {
+ const char* array[] = {"a", "_id.a"}; // still include whole _id (SERVER-7502)
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
+ }
+ {
+ const char* array[] = {"a", "_id", "_id.a"}; // handle both _id and subfield
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
+ }
+ {
+ const char* array[] = {"a", "_id", "_id_a"}; // _id prefixed but non-subfield
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ ASSERT_EQUALS(deps.toProjection(), BSON("_id_a" << 1 << "a" << 1 << "_id" << 1));
+ }
+ {
+ const char* array[] = {"a"}; // fields ignored with needWholeDocument
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ deps.needWholeDocument = true;
+ ASSERT_EQUALS(deps.toProjection(), BSONObj());
+ }
+ {
+ const char* array[] = {"a"}; // needTextScore with needWholeDocument
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ deps.needWholeDocument = true;
+ deps.needTextScore = true;
+ ASSERT_EQUALS(deps.toProjection(), BSON(Document::metaFieldTextScore << metaTextScore));
+ }
+ {
+ const char* array[] = {"a"}; // needTextScore without needWholeDocument
+ DepsTracker deps;
+ deps.fields = arrayToSet(array);
+ deps.needTextScore = true;
+ ASSERT_EQUALS(
+ deps.toProjection(),
+ BSON(Document::metaFieldTextScore << metaTextScore << "a" << 1 << "_id" << 0));
+ }
+ }
+};
+}
- static const char* const ns = "unittests.documentsourcetests";
- static const BSONObj metaTextScore = BSON("$meta" << "textScore");
+namespace DocumentSourceCursor {
- BSONObj toBson( const intrusive_ptr<DocumentSource>& source ) {
- vector<Value> arr;
- source->serializeToArray(arr);
- ASSERT_EQUALS(arr.size(), 1UL);
- return arr[0].getDocument().toBson();
+using mongo::DocumentSourceCursor;
+
+class Base : public CollectionBase {
+public:
+ Base() : _ctx(new ExpressionContext(&_opCtx, NamespaceString(ns))) {
+ _ctx->tempDir = storageGlobalParams.dbpath + "/_tmp";
}
- class CollectionBase {
- public:
- CollectionBase() : client(&_opCtx) {
+protected:
+ void createSource() {
+ // clean up first if this was called before
+ _source.reset();
+ _exec.reset();
- }
+ Client::WriteContext ctx(&_opCtx, ns);
+ CanonicalQuery* cq;
+ uassertStatusOK(CanonicalQuery::canonicalize(ns, /*query=*/BSONObj(), &cq));
+ PlanExecutor* execBare;
+ uassertStatusOK(
+ getExecutor(&_opCtx, ctx.getCollection(), cq, PlanExecutor::YIELD_MANUAL, &execBare));
+
+ _exec.reset(execBare);
+ _exec->saveState();
+ _exec->registerExec();
+
+ _source = DocumentSourceCursor::create(ns, _exec, _ctx);
+ }
+ intrusive_ptr<ExpressionContext> ctx() {
+ return _ctx;
+ }
+ DocumentSourceCursor* source() {
+ return _source.get();
+ }
- ~CollectionBase() {
- client.dropCollection( ns );
+private:
+ // It is important that these are ordered to ensure correct destruction order.
+ boost::shared_ptr<PlanExecutor> _exec;
+ intrusive_ptr<ExpressionContext> _ctx;
+ intrusive_ptr<DocumentSourceCursor> _source;
+};
+
+/** Create a DocumentSourceCursor. */
+class Empty : public Base {
+public:
+ void run() {
+ createSource();
+ // The DocumentSourceCursor doesn't hold a read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ // The collection is empty, so the source produces no results.
+ ASSERT(!source()->getNext());
+ // Exhausting the source releases the read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ }
+};
+
+/** Iterate a DocumentSourceCursor. */
+class Iterate : public Base {
+public:
+ void run() {
+ client.insert(ns, BSON("a" << 1));
+ createSource();
+ // The DocumentSourceCursor doesn't hold a read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ // The cursor will produce the expected result.
+ boost::optional<Document> next = source()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(Value(1), next->getField("a"));
+ // There are no more results.
+ ASSERT(!source()->getNext());
+ // Exhausting the source releases the read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ }
+};
+
+/** Dispose of a DocumentSourceCursor. */
+class Dispose : public Base {
+public:
+ void run() {
+ createSource();
+ // The DocumentSourceCursor doesn't hold a read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ source()->dispose();
+ // Releasing the cursor releases the read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ // The source is marked as exhausted.
+ ASSERT(!source()->getNext());
+ }
+};
+
+/** Iterate a DocumentSourceCursor and then dispose of it. */
+class IterateDispose : public Base {
+public:
+ void run() {
+ client.insert(ns, BSON("a" << 1));
+ client.insert(ns, BSON("a" << 2));
+ client.insert(ns, BSON("a" << 3));
+ createSource();
+ // The result is as expected.
+ boost::optional<Document> next = source()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(Value(1), next->getField("a"));
+ // The next result is as expected.
+ next = source()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(Value(2), next->getField("a"));
+ // The DocumentSourceCursor doesn't hold a read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ source()->dispose();
+ // Disposing of the source releases the lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ // The source cannot be advanced further.
+ ASSERT(!source()->getNext());
+ }
+};
+
+/** Set a value or await an expected value. */
+class PendingValue {
+public:
+ PendingValue(int initialValue)
+ : _value(initialValue), _mutex("DocumentSourceTests::PendingValue::_mutex") {}
+ void set(int newValue) {
+ scoped_lock lk(_mutex);
+ _value = newValue;
+ _condition.notify_all();
+ }
+ void await(int expectedValue) const {
+ scoped_lock lk(_mutex);
+ while (_value != expectedValue) {
+ _condition.wait(lk.boost());
}
+ }
- protected:
- OperationContextImpl _opCtx;
- DBDirectClient client;
- };
+private:
+ int _value;
+ mutable mongo::mutex _mutex;
+ mutable boost::condition _condition;
+};
- namespace DocumentSourceClass {
- using mongo::DocumentSource;
- template<size_t ArrayLen>
- set<string> arrayToSet(const char* (&array) [ArrayLen]) {
- set<string> out;
- for (size_t i = 0; i < ArrayLen; i++)
- out.insert(array[i]);
- return out;
- }
+/** Test coalescing a limit into a cursor */
+class LimitCoalesce : public Base {
+public:
+ intrusive_ptr<DocumentSourceLimit> mkLimit(long long limit) {
+ return DocumentSourceLimit::create(ctx(), limit);
+ }
+ void run() {
+ client.insert(ns, BSON("a" << 1));
+ client.insert(ns, BSON("a" << 2));
+ client.insert(ns, BSON("a" << 3));
+ createSource();
+
+ // initial limit becomes limit of cursor
+ ASSERT(source()->coalesce(mkLimit(10)));
+ ASSERT_EQUALS(source()->getLimit(), 10);
+
+ // smaller limit lowers cursor limit
+ ASSERT(source()->coalesce(mkLimit(2)));
+ ASSERT_EQUALS(source()->getLimit(), 2);
+
+ // higher limit doesn't effect cursor limit
+ ASSERT(source()->coalesce(mkLimit(3)));
+ ASSERT_EQUALS(source()->getLimit(), 2);
+
+ // The cursor allows exactly 2 documents through
+ ASSERT(bool(source()->getNext()));
+ ASSERT(bool(source()->getNext()));
+ ASSERT(!source()->getNext());
+ }
+};
- class Deps {
- public:
- void run() {
- {
- const char* array[] = {"a", "b"}; // basic
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
- }
- {
- const char* array[] = {"a", "ab"}; // prefixed but not subfield
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "ab" << 1 << "_id" << 0));
- }
- {
- const char* array[] = {"a", "b", "a.b"}; // a.b included by a
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
- }
- {
- const char* array[] = {"a", "_id"}; // _id now included
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
- }
- {
- const char* array[] = {"a", "_id.a"}; // still include whole _id (SERVER-7502)
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
- }
- {
- const char* array[] = {"a", "_id", "_id.a"}; // handle both _id and subfield
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
- }
- {
- const char* array[] = {"a", "_id", "_id_a"}; // _id prefixed but non-subfield
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("_id_a" << 1 << "a" << 1 << "_id" << 1));
- }
- {
- const char* array[] = {"a"}; // fields ignored with needWholeDocument
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- deps.needWholeDocument = true;
- ASSERT_EQUALS(deps.toProjection(), BSONObj());
- }
- {
- const char* array[] = {"a"}; // needTextScore with needWholeDocument
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- deps.needWholeDocument = true;
- deps.needTextScore = true;
- ASSERT_EQUALS(
- deps.toProjection(),
- BSON(Document::metaFieldTextScore << metaTextScore));
- }
- {
- const char* array[] = {"a"}; // needTextScore without needWholeDocument
- DepsTracker deps;
- deps.fields = arrayToSet(array);
- deps.needTextScore = true;
- ASSERT_EQUALS(deps.toProjection(),
- BSON(Document::metaFieldTextScore << metaTextScore
- << "a" << 1
- << "_id" << 0));
- }
- }
- };
+
+} // namespace DocumentSourceCursor
+
+namespace DocumentSourceLimit {
+
+using mongo::DocumentSourceLimit;
+
+class Base : public DocumentSourceCursor::Base {
+protected:
+ void createLimit(int limit) {
+ BSONObj spec = BSON("$limit" << limit);
+ BSONElement specElement = spec.firstElement();
+ _limit = DocumentSourceLimit::createFromBson(specElement, ctx());
+ }
+ DocumentSource* limit() {
+ return _limit.get();
}
- namespace DocumentSourceCursor {
+private:
+ intrusive_ptr<DocumentSource> _limit;
+};
+
+/** Exhausting a DocumentSourceLimit disposes of the limit's source. */
+class DisposeSource : public Base {
+public:
+ void run() {
+ client.insert(ns, BSON("a" << 1));
+ client.insert(ns, BSON("a" << 2));
+ createSource();
+ // The DocumentSourceCursor doesn't hold a read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ createLimit(1);
+ limit()->setSource(source());
+ // The limit's result is as expected.
+ boost::optional<Document> next = limit()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(Value(1), next->getField("a"));
+ // The limit is exhausted.
+ ASSERT(!limit()->getNext());
+ // The limit disposes the source, releasing the read lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ }
+};
+
+/** Exhausting a DocumentSourceLimit disposes of the pipeline's DocumentSourceCursor. */
+class DisposeSourceCascade : public Base {
+public:
+ void run() {
+ client.insert(ns, BSON("a" << 1));
+ client.insert(ns, BSON("a" << 1));
+ createSource();
+
+ // Create a DocumentSourceMatch.
+ BSONObj spec = BSON("$match" << BSON("a" << 1));
+ BSONElement specElement = spec.firstElement();
+ intrusive_ptr<DocumentSource> match =
+ DocumentSourceMatch::createFromBson(specElement, ctx());
+ match->setSource(source());
+
+ createLimit(1);
+ limit()->setSource(match.get());
+ // The limit is not exhauted.
+ boost::optional<Document> next = limit()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(Value(1), next->getField("a"));
+ // The limit is exhausted.
+ ASSERT(!limit()->getNext());
+ // The limit disposes the match, which disposes the source and releases the read
+ // lock.
+ ASSERT(!_opCtx.lockState()->isReadLocked());
+ }
+};
+
+/** A limit does not introduce any dependencies. */
+class Dependencies : public Base {
+public:
+ void run() {
+ createLimit(1);
+ DepsTracker dependencies;
+ ASSERT_EQUALS(DocumentSource::SEE_NEXT, limit()->getDependencies(&dependencies));
+ ASSERT_EQUALS(0U, dependencies.fields.size());
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
- using mongo::DocumentSourceCursor;
+} // namespace DocumentSourceLimit
- class Base : public CollectionBase {
- public:
- Base() : _ctx(new ExpressionContext(&_opCtx, NamespaceString(ns))) {
- _ctx->tempDir = storageGlobalParams.dbpath + "/_tmp";
- }
+namespace DocumentSourceGroup {
- protected:
- void createSource() {
- // clean up first if this was called before
- _source.reset();
- _exec.reset();
-
- Client::WriteContext ctx(&_opCtx, ns);
- CanonicalQuery* cq;
- uassertStatusOK(CanonicalQuery::canonicalize(ns, /*query=*/BSONObj(), &cq));
- PlanExecutor* execBare;
- uassertStatusOK(getExecutor(&_opCtx,
- ctx.getCollection(),
- cq,
- PlanExecutor::YIELD_MANUAL,
- &execBare));
-
- _exec.reset(execBare);
- _exec->saveState();
- _exec->registerExec();
-
- _source = DocumentSourceCursor::create(ns, _exec, _ctx);
- }
- intrusive_ptr<ExpressionContext> ctx() { return _ctx; }
- DocumentSourceCursor* source() { return _source.get(); }
-
- private:
- // It is important that these are ordered to ensure correct destruction order.
- boost::shared_ptr<PlanExecutor> _exec;
- intrusive_ptr<ExpressionContext> _ctx;
- intrusive_ptr<DocumentSourceCursor> _source;
- };
-
- /** Create a DocumentSourceCursor. */
- class Empty : public Base {
- public:
- void run() {
- createSource();
- // The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- // The collection is empty, so the source produces no results.
- ASSERT( !source()->getNext() );
- // Exhausting the source releases the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- }
- };
-
- /** Iterate a DocumentSourceCursor. */
- class Iterate : public Base {
- public:
- void run() {
- client.insert( ns, BSON( "a" << 1 ) );
- createSource();
- // The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- // The cursor will produce the expected result.
- boost::optional<Document> next = source()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS(Value(1), next->getField("a"));
- // There are no more results.
- ASSERT( !source()->getNext() );
- // Exhausting the source releases the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- }
- };
-
- /** Dispose of a DocumentSourceCursor. */
- class Dispose : public Base {
- public:
- void run() {
- createSource();
- // The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- source()->dispose();
- // Releasing the cursor releases the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- // The source is marked as exhausted.
- ASSERT( !source()->getNext() );
- }
- };
-
- /** Iterate a DocumentSourceCursor and then dispose of it. */
- class IterateDispose : public Base {
- public:
- void run() {
- client.insert( ns, BSON( "a" << 1 ) );
- client.insert( ns, BSON( "a" << 2 ) );
- client.insert( ns, BSON( "a" << 3 ) );
- createSource();
- // The result is as expected.
- boost::optional<Document> next = source()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS(Value(1), next->getField("a"));
- // The next result is as expected.
- next = source()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS(Value(2), next->getField("a"));
- // The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- source()->dispose();
- // Disposing of the source releases the lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- // The source cannot be advanced further.
- ASSERT( !source()->getNext() );
- }
- };
-
- /** Set a value or await an expected value. */
- class PendingValue {
- public:
- PendingValue( int initialValue ) :
- _value( initialValue ),
- _mutex( "DocumentSourceTests::PendingValue::_mutex" ) {
- }
- void set( int newValue ) {
- scoped_lock lk( _mutex );
- _value = newValue;
- _condition.notify_all();
- }
- void await( int expectedValue ) const {
- scoped_lock lk( _mutex );
- while( _value != expectedValue ) {
- _condition.wait( lk.boost() );
- }
- }
- private:
- int _value;
- mutable mongo::mutex _mutex;
- mutable boost::condition _condition;
- };
-
-
- /** Test coalescing a limit into a cursor */
- class LimitCoalesce : public Base {
- public:
- intrusive_ptr<DocumentSourceLimit> mkLimit(long long limit) {
- return DocumentSourceLimit::create(ctx(), limit);
- }
- void run() {
- client.insert( ns, BSON( "a" << 1 ) );
- client.insert( ns, BSON( "a" << 2 ) );
- client.insert( ns, BSON( "a" << 3 ) );
- createSource();
-
- // initial limit becomes limit of cursor
- ASSERT(source()->coalesce(mkLimit(10)));
- ASSERT_EQUALS(source()->getLimit(), 10);
-
- // smaller limit lowers cursor limit
- ASSERT(source()->coalesce(mkLimit(2)));
- ASSERT_EQUALS(source()->getLimit(), 2);
-
- // higher limit doesn't effect cursor limit
- ASSERT(source()->coalesce(mkLimit(3)));
- ASSERT_EQUALS(source()->getLimit(), 2);
-
- // The cursor allows exactly 2 documents through
- ASSERT(bool(source()->getNext()));
- ASSERT(bool(source()->getNext()));
- ASSERT(!source()->getNext());
- }
- };
+using mongo::DocumentSourceGroup;
+class Base : public DocumentSourceCursor::Base {
+protected:
+ void createGroup(const BSONObj& spec, bool inShard = false) {
+ BSONObj namedSpec = BSON("$group" << spec);
+ BSONElement specElement = namedSpec.firstElement();
- } // namespace DocumentSourceCursor
+ intrusive_ptr<ExpressionContext> expressionContext =
+ new ExpressionContext(&_opCtx, NamespaceString(ns));
+ expressionContext->inShard = inShard;
+ expressionContext->tempDir = storageGlobalParams.dbpath + "/_tmp";
- namespace DocumentSourceLimit {
+ _group = DocumentSourceGroup::createFromBson(specElement, expressionContext);
+ assertRoundTrips(_group);
+ _group->setSource(source());
+ }
+ DocumentSource* group() {
+ return _group.get();
+ }
+ /** Assert that iterator state accessors consistently report the source is exhausted. */
+ void assertExhausted(const intrusive_ptr<DocumentSource>& source) const {
+ // It should be safe to check doneness multiple times
+ ASSERT(!source->getNext());
+ ASSERT(!source->getNext());
+ ASSERT(!source->getNext());
+ }
- using mongo::DocumentSourceLimit;
+private:
+ /** Check that the group's spec round trips. */
+ void assertRoundTrips(const intrusive_ptr<DocumentSource>& group) {
+ // We don't check against the spec that generated 'group' originally, because
+ // $const operators may be introduced in the first serialization.
+ BSONObj spec = toBson(group);
+ BSONElement specElement = spec.firstElement();
+ intrusive_ptr<DocumentSource> generated =
+ DocumentSourceGroup::createFromBson(specElement, ctx());
+ ASSERT_EQUALS(spec, toBson(generated));
+ }
+ intrusive_ptr<DocumentSource> _group;
+};
+
+class ParseErrorBase : public Base {
+public:
+ virtual ~ParseErrorBase() {}
+ void run() {
+ ASSERT_THROWS(createGroup(spec()), UserException);
+ }
- class Base : public DocumentSourceCursor::Base {
- protected:
- void createLimit( int limit ) {
- BSONObj spec = BSON( "$limit" << limit );
- BSONElement specElement = spec.firstElement();
- _limit = DocumentSourceLimit::createFromBson( specElement, ctx() );
- }
- DocumentSource* limit() { return _limit.get(); }
- private:
- intrusive_ptr<DocumentSource> _limit;
- };
-
- /** Exhausting a DocumentSourceLimit disposes of the limit's source. */
- class DisposeSource : public Base {
- public:
- void run() {
- client.insert( ns, BSON( "a" << 1 ) );
- client.insert( ns, BSON( "a" << 2 ) );
- createSource();
- // The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- createLimit( 1 );
- limit()->setSource( source() );
- // The limit's result is as expected.
- boost::optional<Document> next = limit()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS(Value(1), next->getField("a"));
- // The limit is exhausted.
- ASSERT( !limit()->getNext() );
- // The limit disposes the source, releasing the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- }
- };
-
- /** Exhausting a DocumentSourceLimit disposes of the pipeline's DocumentSourceCursor. */
- class DisposeSourceCascade : public Base {
- public:
- void run() {
- client.insert( ns, BSON( "a" << 1 ) );
- client.insert( ns, BSON( "a" << 1 ) );
- createSource();
-
- // Create a DocumentSourceMatch.
- BSONObj spec = BSON( "$match" << BSON( "a" << 1 ) );
- BSONElement specElement = spec.firstElement();
- intrusive_ptr<DocumentSource> match =
- DocumentSourceMatch::createFromBson( specElement, ctx() );
- match->setSource( source() );
-
- createLimit( 1 );
- limit()->setSource( match.get() );
- // The limit is not exhauted.
- boost::optional<Document> next = limit()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS(Value(1), next->getField("a"));
- // The limit is exhausted.
- ASSERT( !limit()->getNext() );
- // The limit disposes the match, which disposes the source and releases the read
- // lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
- }
- };
-
- /** A limit does not introduce any dependencies. */
- class Dependencies : public Base {
- public:
- void run() {
- createLimit( 1 );
- DepsTracker dependencies;
- ASSERT_EQUALS( DocumentSource::SEE_NEXT, limit()->getDependencies(&dependencies) );
- ASSERT_EQUALS( 0U, dependencies.fields.size() );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
+protected:
+ virtual BSONObj spec() = 0;
+};
+
+class ExpressionBase : public Base {
+public:
+ virtual ~ExpressionBase() {}
+ void run() {
+ // Insert a single document for $group to iterate over.
+ client.insert(ns, doc());
+ createSource();
+ createGroup(spec());
+ // A group result is available.
+ boost::optional<Document> next = group()->getNext();
+ ASSERT(bool(next));
+ // The constant _id value from the $group spec is passed through.
+ ASSERT_EQUALS(expected(), next->toBson());
+ }
- } // namespace DocumentSourceLimit
+protected:
+ virtual BSONObj doc() = 0;
+ virtual BSONObj spec() = 0;
+ virtual BSONObj expected() = 0;
+};
- namespace DocumentSourceGroup {
+class IdConstantBase : public ExpressionBase {
+ virtual BSONObj doc() {
+ return BSONObj();
+ }
+ virtual BSONObj expected() {
+ // Since spec() specifies a constant _id, its value will be passed through.
+ return spec();
+ }
+};
+
+/** $group spec is not an object. */
+class NonObject : public Base {
+public:
+ void run() {
+ BSONObj spec = BSON("$group"
+ << "foo");
+ BSONElement specElement = spec.firstElement();
+ ASSERT_THROWS(DocumentSourceGroup::createFromBson(specElement, ctx()), UserException);
+ }
+};
- using mongo::DocumentSourceGroup;
+/** $group spec is an empty object. */
+class EmptySpec : public ParseErrorBase {
+ BSONObj spec() {
+ return BSONObj();
+ }
+};
- class Base : public DocumentSourceCursor::Base {
- protected:
- void createGroup( const BSONObj &spec, bool inShard = false ) {
- BSONObj namedSpec = BSON( "$group" << spec );
- BSONElement specElement = namedSpec.firstElement();
+/** $group _id is an empty object. */
+class IdEmptyObject : public IdConstantBase {
+ BSONObj spec() {
+ return BSON("_id" << BSONObj());
+ }
+};
- intrusive_ptr<ExpressionContext> expressionContext =
- new ExpressionContext(&_opCtx, NamespaceString(ns));
- expressionContext->inShard = inShard;
- expressionContext->tempDir = storageGlobalParams.dbpath + "/_tmp";
+/** $group _id is computed from an object expression. */
+class IdObjectExpression : public ExpressionBase {
+ BSONObj doc() {
+ return BSON("a" << 6);
+ }
+ BSONObj spec() {
+ return BSON("_id" << BSON("z"
+ << "$a"));
+ }
+ BSONObj expected() {
+ return BSON("_id" << BSON("z" << 6));
+ }
+};
- _group = DocumentSourceGroup::createFromBson( specElement, expressionContext );
- assertRoundTrips( _group );
- _group->setSource( source() );
- }
- DocumentSource* group() { return _group.get(); }
- /** Assert that iterator state accessors consistently report the source is exhausted. */
- void assertExhausted( const intrusive_ptr<DocumentSource> &source ) const {
- // It should be safe to check doneness multiple times
- ASSERT( !source->getNext() );
- ASSERT( !source->getNext() );
- ASSERT( !source->getNext() );
- }
- private:
- /** Check that the group's spec round trips. */
- void assertRoundTrips( const intrusive_ptr<DocumentSource>& group ) {
- // We don't check against the spec that generated 'group' originally, because
- // $const operators may be introduced in the first serialization.
- BSONObj spec = toBson(group);
- BSONElement specElement = spec.firstElement();
- intrusive_ptr<DocumentSource> generated =
- DocumentSourceGroup::createFromBson( specElement, ctx() );
- ASSERT_EQUALS( spec, toBson( generated ) );
- }
- intrusive_ptr<DocumentSource> _group;
- };
+/** $group _id is specified as an invalid object expression. */
+class IdInvalidObjectExpression : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << BSON("$add" << 1 << "$and" << 1));
+ }
+};
- class ParseErrorBase : public Base {
- public:
- virtual ~ParseErrorBase() {
- }
- void run() {
- ASSERT_THROWS( createGroup( spec() ), UserException );
- }
- protected:
- virtual BSONObj spec() = 0;
- };
+/** $group with two _id specs. */
+class TwoIdSpecs : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << 1 << "_id" << 2);
+ }
+};
- class ExpressionBase : public Base {
- public:
- virtual ~ExpressionBase() {
- }
- void run() {
- // Insert a single document for $group to iterate over.
- client.insert( ns, doc() );
- createSource();
- createGroup( spec() );
- // A group result is available.
- boost::optional<Document> next = group()->getNext();
- ASSERT(bool(next));
- // The constant _id value from the $group spec is passed through.
- ASSERT_EQUALS(expected(), next->toBson());
- }
- protected:
- virtual BSONObj doc() = 0;
- virtual BSONObj spec() = 0;
- virtual BSONObj expected() = 0;
- };
-
- class IdConstantBase : public ExpressionBase {
- virtual BSONObj doc() { return BSONObj(); }
- virtual BSONObj expected() {
- // Since spec() specifies a constant _id, its value will be passed through.
- return spec();
- }
- };
-
- /** $group spec is not an object. */
- class NonObject : public Base {
- public:
- void run() {
- BSONObj spec = BSON( "$group" << "foo" );
- BSONElement specElement = spec.firstElement();
- ASSERT_THROWS( DocumentSourceGroup::createFromBson( specElement, ctx() ),
- UserException );
- }
- };
-
- /** $group spec is an empty object. */
- class EmptySpec : public ParseErrorBase {
- BSONObj spec() { return BSONObj(); }
- };
-
- /** $group _id is an empty object. */
- class IdEmptyObject : public IdConstantBase {
- BSONObj spec() { return BSON( "_id" << BSONObj() ); }
- };
-
- /** $group _id is computed from an object expression. */
- class IdObjectExpression : public ExpressionBase {
- BSONObj doc() { return BSON( "a" << 6 ); }
- BSONObj spec() { return BSON( "_id" << BSON( "z" << "$a" ) ); }
- BSONObj expected() { return BSON( "_id" << BSON( "z" << 6 ) ); }
- };
-
- /** $group _id is specified as an invalid object expression. */
- class IdInvalidObjectExpression : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << BSON( "$add" << 1 << "$and" << 1 ) ); }
- };
-
- /** $group with two _id specs. */
- class TwoIdSpecs : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << 1 << "_id" << 2 ); }
- };
-
- /** $group _id is the empty string. */
- class IdEmptyString : public IdConstantBase {
- BSONObj spec() { return BSON( "_id" << "" ); }
- };
-
- /** $group _id is a string constant. */
- class IdStringConstant : public IdConstantBase {
- BSONObj spec() { return BSON( "_id" << "abc" ); }
- };
-
- /** $group _id is a field path expression. */
- class IdFieldPath : public ExpressionBase {
- BSONObj doc() { return BSON( "a" << 5 ); }
- BSONObj spec() { return BSON( "_id" << "$a" ); }
- BSONObj expected() { return BSON( "_id" << 5 ); }
- };
-
- /** $group with _id set to an invalid field path. */
- class IdInvalidFieldPath : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << "$a.." ); }
- };
-
- /** $group _id is a numeric constant. */
- class IdNumericConstant : public IdConstantBase {
- BSONObj spec() { return BSON( "_id" << 2 ); }
- };
-
- /** $group _id is an array constant. */
- class IdArrayConstant : public IdConstantBase {
- BSONObj spec() { return BSON( "_id" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** $group _id is a regular expression (not supported). */
- class IdRegularExpression : public IdConstantBase {
- BSONObj spec() { return fromjson( "{_id:/a/}" ); }
- };
-
- /** The name of an aggregate field is specified with a $ prefix. */
- class DollarAggregateFieldName : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << 1 << "$foo" << BSON( "$sum" << 1 ) ); }
- };
-
- /** An aggregate field spec that is not an object. */
- class NonObjectAggregateSpec : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << 1 << "a" << 1 ); }
- };
-
- /** An aggregate field spec that is not an object. */
- class EmptyObjectAggregateSpec : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << 1 << "a" << BSONObj() ); }
- };
-
- /** An aggregate field spec with an invalid accumulator operator. */
- class BadAccumulator : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << 1 << "a" << BSON( "$bad" << 1 ) ); }
- };
-
- /** An aggregate field spec with an array argument. */
- class SumArray : public ParseErrorBase {
- BSONObj spec() { return BSON( "_id" << 1 << "a" << BSON( "$sum" << BSONArray() ) ); }
- };
-
- /** Multiple accumulator operators for a field. */
- class MultipleAccumulatorsForAField : public ParseErrorBase {
- BSONObj spec() {
- return BSON( "_id" << 1 << "a" << BSON( "$sum" << 1 << "$push" << 1 ) );
- }
- };
-
- /** Aggregation using duplicate field names is allowed currently. */
- class DuplicateAggregateFieldNames : public ExpressionBase {
- BSONObj doc() { return BSONObj(); }
- BSONObj spec() {
- return BSON( "_id" << 0 << "z" << BSON( "$sum" << 1 )
- << "z" << BSON( "$push" << 1 ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "z" << 1 << "z" << BSON_ARRAY( 1 ) ); }
- };
-
- /** Aggregate the value of an object expression. */
- class AggregateObjectExpression : public ExpressionBase {
- BSONObj doc() { return BSON( "a" << 6 ); }
- BSONObj spec() {
- return BSON( "_id" << 0 << "z" << BSON( "$first" << BSON( "x" << "$a" ) ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "z" << BSON( "x" << 6 ) ); }
- };
-
- /** Aggregate the value of an operator expression. */
- class AggregateOperatorExpression : public ExpressionBase {
- BSONObj doc() { return BSON( "a" << 6 ); }
- BSONObj spec() {
- return BSON( "_id" << 0 << "z" << BSON( "$first" << "$a" ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "z" << 6 ); }
- };
+/** $group _id is the empty string. */
+class IdEmptyString : public IdConstantBase {
+ BSONObj spec() {
+ return BSON("_id"
+ << "");
+ }
+};
- struct ValueCmp {
- bool operator()(const Value& a, const Value& b) const {
- return Value::compare( a, b ) < 0;
- }
- };
- typedef map<Value,Document,ValueCmp> IdMap;
+/** $group _id is a string constant. */
+class IdStringConstant : public IdConstantBase {
+ BSONObj spec() {
+ return BSON("_id"
+ << "abc");
+ }
+};
- class CheckResultsBase : public Base {
- public:
- virtual ~CheckResultsBase() {
- }
- void run() {
- runSharded( false );
- client.dropCollection( ns );
- runSharded( true );
- }
- void runSharded( bool sharded ) {
- populateData();
- createSource();
- createGroup( groupSpec() );
-
- intrusive_ptr<DocumentSource> sink = group();
- if ( sharded ) {
- sink = createMerger();
- // Serialize and re-parse the shard stage.
- createGroup( toBson( group() )[ "$group" ].Obj(), true );
- sink->setSource( group() );
- }
-
- checkResultSet( sink );
- }
- protected:
- virtual void populateData() {}
- virtual BSONObj groupSpec() { return BSON( "_id" << 0 ); }
- /** Expected results. Must be sorted by _id to ensure consistent ordering. */
- virtual BSONObj expectedResultSet() {
- BSONObj wrappedResult =
- // fromjson cannot parse an array, so place the array within an object.
- fromjson( string( "{'':" ) + expectedResultSetString() + "}" );
- return wrappedResult[ "" ].embeddedObject().getOwned();
- }
- /** Expected results. Must be sorted by _id to ensure consistent ordering. */
- virtual string expectedResultSetString() { return "[]"; }
- intrusive_ptr<DocumentSource> createMerger() {
- // Set up a group merger to simulate merging results in the router. In this
- // case only one shard is in use.
- SplittableDocumentSource *splittable =
- dynamic_cast<SplittableDocumentSource*>( group() );
- ASSERT( splittable );
- intrusive_ptr<DocumentSource> routerSource = splittable->getMergeSource();
- ASSERT_NOT_EQUALS( group(), routerSource.get() );
- return routerSource;
- }
- void checkResultSet( const intrusive_ptr<DocumentSource> &sink ) {
- // Load the results from the DocumentSourceGroup and sort them by _id.
- IdMap resultSet;
- while (boost::optional<Document> current = sink->getNext()) {
- // Save the current result.
- Value id = current->getField( "_id" );
- resultSet[ id ] = *current;
- }
- // Verify the DocumentSourceGroup is exhausted.
- assertExhausted( sink );
-
- // Convert results to BSON once they all have been retrieved (to detect any errors
- // resulting from incorrectly shared sub objects).
- BSONArrayBuilder bsonResultSet;
- for( IdMap::const_iterator i = resultSet.begin(); i != resultSet.end(); ++i ) {
- bsonResultSet << i->second;
- }
- // Check the result set.
- ASSERT_EQUALS( expectedResultSet(), bsonResultSet.arr() );
- }
- };
+/** $group _id is a field path expression. */
+class IdFieldPath : public ExpressionBase {
+ BSONObj doc() {
+ return BSON("a" << 5);
+ }
+ BSONObj spec() {
+ return BSON("_id"
+ << "$a");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 5);
+ }
+};
- /** An empty collection generates no results. */
- class EmptyCollection : public CheckResultsBase {
- };
+/** $group with _id set to an invalid field path. */
+class IdInvalidFieldPath : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id"
+ << "$a..");
+ }
+};
- /** A $group performed on a single document. */
- class SingleDocument : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "a" << 1 ) );
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << 0 << "a" << BSON( "$sum" << "$a" ) );
- }
- virtual string expectedResultSetString() { return "[{_id:0,a:1}]"; }
- };
-
- /** A $group performed on two values for a single key. */
- class TwoValuesSingleKey : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "a" << 1 ) );
- client.insert( ns, BSON( "a" << 2 ) );
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << 0 << "a" << BSON( "$push" << "$a" ) );
- }
- virtual string expectedResultSetString() { return "[{_id:0,a:[1,2]}]"; }
- };
-
- /** A $group performed on two values with one key each. */
- class TwoValuesTwoKeys : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 1 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << 2 ) );
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << "$_id" << "a" << BSON( "$push" << "$a" ) );
- }
- virtual string expectedResultSetString() { return "[{_id:0,a:[1]},{_id:1,a:[2]}]"; }
- };
-
- /** A $group performed on two values with two keys each. */
- class FourValuesTwoKeys : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "id" << 0 << "a" << 1 ) );
- client.insert( ns, BSON( "id" << 1 << "a" << 2 ) );
- client.insert( ns, BSON( "id" << 0 << "a" << 3 ) );
- client.insert( ns, BSON( "id" << 1 << "a" << 4 ) );
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << "$id" << "a" << BSON( "$push" << "$a" ) );
- }
- virtual string expectedResultSetString() { return "[{_id:0,a:[1,3]},{_id:1,a:[2,4]}]"; }
- };
-
- /** A $group performed on two values with two keys each and two accumulator operations. */
- class FourValuesTwoKeysTwoAccumulators : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "id" << 0 << "a" << 1 ) );
- client.insert( ns, BSON( "id" << 1 << "a" << 2 ) );
- client.insert( ns, BSON( "id" << 0 << "a" << 3 ) );
- client.insert( ns, BSON( "id" << 1 << "a" << 4 ) );
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << "$id"
- << "list" << BSON( "$push" << "$a" )
- << "sum" << BSON( "$sum"
- << BSON( "$divide" << BSON_ARRAY( "$a" << 2 ) ) ) );
- }
- virtual string expectedResultSetString() {
- return "[{_id:0,list:[1,3],sum:2},{_id:1,list:[2,4],sum:3}]";
- }
- };
+/** $group _id is a numeric constant. */
+class IdNumericConstant : public IdConstantBase {
+ BSONObj spec() {
+ return BSON("_id" << 2);
+ }
+};
- /** Null and undefined _id values are grouped together. */
- class GroupNullUndefinedIds : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "a" << BSONNULL << "b" << 100 ) );
- client.insert( ns, BSON( "b" << 10 ) );
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << "$a" << "sum" << BSON( "$sum" << "$b" ) );
- }
- virtual string expectedResultSetString() { return "[{_id:null,sum:110}]"; }
- };
-
- /** A complex _id expression. */
- class ComplexId : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "a" << "de"
- << "b" << "ad"
- << "c" << "beef"
- << "d" << ""
- ));
- client.insert( ns, BSON( "a" << "d"
- << "b" << "eadbe"
- << "c" << ""
- << "d" << "ef"
- ));
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << BSON( "$concat"
- << BSON_ARRAY( "$a" << "$b" << "$c" << "$d" ) ) );
- }
- virtual string expectedResultSetString() { return "[{_id:'deadbeef'}]"; }
- };
+/** $group _id is an array constant. */
+class IdArrayConstant : public IdConstantBase {
+ BSONObj spec() {
+ return BSON("_id" << BSON_ARRAY(1 << 2));
+ }
+};
- /** An undefined accumulator value is dropped. */
- class UndefinedAccumulatorValue : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSONObj() );
- }
- virtual BSONObj groupSpec() {
- return BSON( "_id" << 0 << "first" << BSON( "$first" << "$missing" ) );
- }
- virtual string expectedResultSetString() { return "[{_id:0, first:null}]"; }
- };
-
- /** Simulate merging sharded results in the router. */
- class RouterMerger : public CheckResultsBase {
- public:
- void run() {
- BSONObj sourceData =
- fromjson( "{'':[{_id:0,list:[1,2]},{_id:1,list:[3,4]}" // from shard 1
- ",{_id:0,list:[10,20]},{_id:1,list:[30,40]}]}" // from shard 2
- );
- BSONElement sourceDataElement = sourceData.firstElement();
- // Create a source with synthetic data.
- intrusive_ptr<DocumentSourceBsonArray> source =
- DocumentSourceBsonArray::create( sourceDataElement.Obj(), ctx() );
- // Create a group source.
- createGroup( BSON( "_id" << "$x" << "list" << BSON( "$push" << "$y" ) ) );
- // Create a merger version of the source.
- intrusive_ptr<DocumentSource> group = createMerger();
- // Attach the merger to the synthetic shard results.
- group->setSource( source.get() );
- // Check the merger's output.
- checkResultSet( group );
- }
- private:
- string expectedResultSetString() {
- return "[{_id:0,list:[1,2,10,20]},{_id:1,list:[3,4,30,40]}]";
- }
- };
-
- /** Dependant field paths. */
- class Dependencies : public Base {
- public:
- void run() {
- createGroup( fromjson( "{_id:'$x',a:{$sum:'$y.z'},b:{$avg:{$add:['$u','$v']}}}" ) );
- DepsTracker dependencies;
- ASSERT_EQUALS( DocumentSource::EXHAUSTIVE_ALL,
- group()->getDependencies( &dependencies ) );
- ASSERT_EQUALS( 4U, dependencies.fields.size() );
- // Dependency from _id expression.
- ASSERT_EQUALS( 1U, dependencies.fields.count( "x" ) );
- // Dependencies from accumulator expressions.
- ASSERT_EQUALS( 1U, dependencies.fields.count( "y.z" ) );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "u" ) );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "v" ) );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
-
- /**
- * A string constant (not a field path) as an _id expression and passed to an accumulator.
- * SERVER-6766
- */
- class StringConstantIdAndAccumulatorExpressions : public CheckResultsBase {
- void populateData() { client.insert( ns, BSONObj() ); }
- BSONObj groupSpec() {
- return fromjson( "{_id:{$const:'$_id...'},a:{$push:{$const:'$a...'}}}" );
- }
- string expectedResultSetString() { return "[{_id:'$_id...',a:['$a...']}]"; }
- };
-
- /** An array constant passed to an accumulator. */
- class ArrayConstantAccumulatorExpression : public CheckResultsBase {
- public:
- void run() {
- // A parse exception is thrown when a raw array is provided to an accumulator.
- ASSERT_THROWS( createGroup( fromjson( "{_id:1,a:{$push:[4,5,6]}}" ) ),
- UserException );
- // Run standard base tests.
- CheckResultsBase::run();
- }
- void populateData() { client.insert( ns, BSONObj() ); }
- BSONObj groupSpec() {
- // An array can be specified using $const.
- return fromjson( "{_id:[1,2,3],a:{$push:{$const:[4,5,6]}}}" );
- }
- string expectedResultSetString() { return "[{_id:[1,2,3],a:[[4,5,6]]}]"; }
- };
+/** $group _id is a regular expression (not supported). */
+class IdRegularExpression : public IdConstantBase {
+ BSONObj spec() {
+ return fromjson("{_id:/a/}");
+ }
+};
- } // namespace DocumentSourceGroup
+/** The name of an aggregate field is specified with a $ prefix. */
+class DollarAggregateFieldName : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << 1 << "$foo" << BSON("$sum" << 1));
+ }
+};
- namespace DocumentSourceProject {
+/** An aggregate field spec that is not an object. */
+class NonObjectAggregateSpec : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << 1 << "a" << 1);
+ }
+};
- using mongo::DocumentSourceProject;
+/** An aggregate field spec that is not an object. */
+class EmptyObjectAggregateSpec : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << 1 << "a" << BSONObj());
+ }
+};
- class Base : public DocumentSourceCursor::Base {
- protected:
- void createProject( const BSONObj& projection = BSON( "a" << true ) ) {
- BSONObj spec = BSON( "$project" << projection );
- BSONElement specElement = spec.firstElement();
- _project = DocumentSourceProject::createFromBson( specElement, ctx() );
- checkBsonRepresentation( spec );
- _project->setSource( source() );
- }
- DocumentSource* project() { return _project.get(); }
- /** Assert that iterator state accessors consistently report the source is exhausted. */
- void assertExhausted() const {
- ASSERT( !_project->getNext() );
- ASSERT( !_project->getNext() );
- ASSERT( !_project->getNext() );
- }
- /**
- * Check that the BSON representation generated by the souce matches the BSON it was
- * created with.
- */
- void checkBsonRepresentation( const BSONObj& spec ) {
- vector<Value> arr;
- _project->serializeToArray(arr);
- BSONObj generatedSpec = arr[0].getDocument().toBson();
- ASSERT_EQUALS( spec, generatedSpec );
- }
- private:
- intrusive_ptr<DocumentSource> _project;
- };
-
- /** The 'a' and 'c.d' fields are included, but the 'b' field is not. */
- class Inclusion : public Base {
- public:
- void run() {
- client.insert( ns, fromjson( "{_id:0,a:1,b:1,c:{d:1}}" ) );
- createSource();
- createProject( BSON( "a" << true << "c" << BSON( "d" << true ) ) );
- // The first result exists and is as expected.
- boost::optional<Document> next = project()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS( 1, next->getField( "a" ).getInt() );
- ASSERT( next->getField( "b" ).missing() );
- // The _id field is included by default in the root document.
- ASSERT_EQUALS(0, next->getField( "_id" ).getInt());
- // The nested c.d inclusion.
- ASSERT_EQUALS(1, (*next)["c"]["d"].getInt());
- }
- };
-
- /** Optimize the projection. */
- class Optimize : public Base {
- public:
- void run() {
- createProject(BSON("a" << BSON("$and" << BSON_ARRAY(BSON("$const" << true)))));
- project()->optimize();
- // Optimizing the DocumentSourceProject optimizes the Expressions that comprise it,
- // in this case replacing an expression depending on constants with a constant.
- checkBsonRepresentation( fromjson( "{$project:{a:{$const:true}}}" ) );
- }
- };
-
- /** Projection spec is not an object. */
- class NonObjectSpec : public Base {
- public:
- void run() {
- BSONObj spec = BSON( "$project" << "foo" );
- BSONElement specElement = spec.firstElement();
- ASSERT_THROWS( DocumentSourceProject::createFromBson( specElement, ctx() ),
- UserException );
- }
- };
+/** An aggregate field spec with an invalid accumulator operator. */
+class BadAccumulator : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << 1 << "a" << BSON("$bad" << 1));
+ }
+};
- /** Projection spec is an empty object. */
- class EmptyObjectSpec : public Base {
- public:
- void run() {
- ASSERT_THROWS( createProject( BSONObj() ), UserException );
- }
- };
+/** An aggregate field spec with an array argument. */
+class SumArray : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << 1 << "a" << BSON("$sum" << BSONArray()));
+ }
+};
- /** Projection spec contains a top level dollar sign. */
- class TopLevelDollar : public Base {
- public:
- void run() {
- ASSERT_THROWS( createProject( BSON( "$add" << BSONArray() ) ), UserException );
- }
- };
-
- /** Projection spec is invalid. */
- class InvalidSpec : public Base {
- public:
- void run() {
- ASSERT_THROWS( createProject( BSON( "a" << BSON( "$invalidOperator" << 1 ) ) ),
- UserException );
- }
- };
-
- /** Two documents are projected. */
- class TwoDocuments : public Base {
- public:
- void run() {
- client.insert( ns, BSON( "a" << 1 << "b" << 2 ) );
- client.insert( ns, BSON( "a" << 3 << "b" << 4 ) );
- createSource();
- createProject();
- boost::optional<Document> next = project()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS( 1, next->getField( "a" ).getInt() );
- ASSERT( next->getField( "b" ).missing() );
-
- next = project()->getNext();
- ASSERT(bool(next));
- ASSERT_EQUALS( 3, next->getField( "a" ).getInt() );
- ASSERT( next->getField( "b" ).missing() );
-
- assertExhausted();
- }
- };
-
- /** List of dependent field paths. */
- class Dependencies : public Base {
- public:
- void run() {
- createProject(fromjson(
- "{a:true,x:'$b',y:{$and:['$c','$d']}, z: {$meta:'textScore'}}"));
- DepsTracker dependencies;
- ASSERT_EQUALS( DocumentSource::EXHAUSTIVE_FIELDS,
- project()->getDependencies( &dependencies ) );
- ASSERT_EQUALS( 5U, dependencies.fields.size() );
- // Implicit _id dependency.
- ASSERT_EQUALS( 1U, dependencies.fields.count( "_id" ) );
- // Inclusion dependency.
- ASSERT_EQUALS( 1U, dependencies.fields.count( "a" ) );
- // Field path expression dependency.
- ASSERT_EQUALS( 1U, dependencies.fields.count( "b" ) );
- // Nested expression dependencies.
- ASSERT_EQUALS( 1U, dependencies.fields.count( "c" ) );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "d" ) );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( true, dependencies.needTextScore );
- }
- };
-
- } // namespace DocumentSourceProject
-
- namespace DocumentSourceSort {
-
- using mongo::DocumentSourceSort;
-
- class Base : public DocumentSourceCursor::Base {
- protected:
- void createSort( const BSONObj& sortKey = BSON( "a" << 1 ) ) {
- BSONObj spec = BSON( "$sort" << sortKey );
- BSONElement specElement = spec.firstElement();
- _sort = DocumentSourceSort::createFromBson( specElement, ctx() );
- checkBsonRepresentation( spec );
- _sort->setSource( source() );
- }
- DocumentSourceSort* sort() { return dynamic_cast<DocumentSourceSort*>(_sort.get()); }
- /** Assert that iterator state accessors consistently report the source is exhausted. */
- void assertExhausted() const {
- ASSERT( !_sort->getNext() );
- ASSERT( !_sort->getNext() );
- ASSERT( !_sort->getNext() );
- }
- private:
- /**
- * Check that the BSON representation generated by the souce matches the BSON it was
- * created with.
- */
- void checkBsonRepresentation( const BSONObj& spec ) {
- vector<Value> arr;
- _sort->serializeToArray(arr);
- BSONObj generatedSpec = arr[0].getDocument().toBson();
- ASSERT_EQUALS( spec, generatedSpec );
- }
- intrusive_ptr<DocumentSource> _sort;
- };
-
- class SortWithLimit : public Base {
- public:
- void run() {
- createSort(BSON("a" << 1));
- ASSERT_EQUALS(sort()->getLimit(), -1);
-
- { // pre-limit checks
- vector<Value> arr;
- sort()->serializeToArray(arr);
- ASSERT_EQUALS(arr[0].getDocument().toBson(), BSON("$sort" << BSON("a" << 1)));
-
- ASSERT(sort()->getShardSource() == NULL);
- ASSERT(sort()->getMergeSource() != NULL);
- }
-
- ASSERT_TRUE(sort()->coalesce(mkLimit(10)));
- ASSERT_EQUALS(sort()->getLimit(), 10);
- ASSERT_TRUE(sort()->coalesce(mkLimit(15)));
- ASSERT_EQUALS(sort()->getLimit(), 10); // unchanged
- ASSERT_TRUE(sort()->coalesce(mkLimit(5)));
- ASSERT_EQUALS(sort()->getLimit(), 5); // reduced
-
- vector<Value> arr;
- sort()->serializeToArray(arr);
- ASSERT_EQUALS(Value(arr), DOC_ARRAY(DOC("$sort" << DOC("a" << 1))
- << DOC("$limit" << sort()->getLimit())));
-
- ASSERT(sort()->getShardSource() != NULL);
- ASSERT(sort()->getMergeSource() != NULL);
- }
+/** Multiple accumulator operators for a field. */
+class MultipleAccumulatorsForAField : public ParseErrorBase {
+ BSONObj spec() {
+ return BSON("_id" << 1 << "a" << BSON("$sum" << 1 << "$push" << 1));
+ }
+};
- intrusive_ptr<DocumentSource> mkLimit(int limit) {
- BSONObj obj = BSON("$limit" << limit);
- BSONElement e = obj.firstElement();
- return mongo::DocumentSourceLimit::createFromBson(e, ctx());
- }
- };
-
- class CheckResultsBase : public Base {
- public:
- virtual ~CheckResultsBase() {}
- void run() {
- populateData();
- createSource();
- createSort( sortSpec() );
-
- // Load the results from the DocumentSourceUnwind.
- vector<Document> resultSet;
- while (boost::optional<Document> current = sort()->getNext()) {
- // Get the current result.
- resultSet.push_back(*current);
- }
- // Verify the DocumentSourceUnwind is exhausted.
- assertExhausted();
-
- // Convert results to BSON once they all have been retrieved (to detect any errors
- // resulting from incorrectly shared sub objects).
- BSONArrayBuilder bsonResultSet;
- for( vector<Document>::const_iterator i = resultSet.begin();
- i != resultSet.end(); ++i ) {
- bsonResultSet << *i;
- }
- // Check the result set.
- ASSERT_EQUALS( expectedResultSet(), bsonResultSet.arr() );
- }
- protected:
- virtual void populateData() {}
- virtual BSONObj expectedResultSet() {
- BSONObj wrappedResult =
- // fromjson cannot parse an array, so place the array within an object.
- fromjson( string( "{'':" ) + expectedResultSetString() + "}" );
- return wrappedResult[ "" ].embeddedObject().getOwned();
- }
- virtual string expectedResultSetString() { return "[]"; }
- virtual BSONObj sortSpec() { return BSON( "a" << 1 ); }
- };
+/** Aggregation using duplicate field names is allowed currently. */
+class DuplicateAggregateFieldNames : public ExpressionBase {
+ BSONObj doc() {
+ return BSONObj();
+ }
+ BSONObj spec() {
+ return BSON("_id" << 0 << "z" << BSON("$sum" << 1) << "z" << BSON("$push" << 1));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "z" << 1 << "z" << BSON_ARRAY(1));
+ }
+};
- class InvalidSpecBase : public Base {
- public:
- virtual ~InvalidSpecBase() {
- }
- void run() {
- ASSERT_THROWS( createSort( sortSpec() ), UserException );
- }
- protected:
- virtual BSONObj sortSpec() = 0;
- };
+/** Aggregate the value of an object expression. */
+class AggregateObjectExpression : public ExpressionBase {
+ BSONObj doc() {
+ return BSON("a" << 6);
+ }
+ BSONObj spec() {
+ return BSON("_id" << 0 << "z" << BSON("$first" << BSON("x"
+ << "$a")));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "z" << BSON("x" << 6));
+ }
+};
- class InvalidOperationBase : public Base {
- public:
- virtual ~InvalidOperationBase() {
- }
- void run() {
- populateData();
- createSource();
- createSort( sortSpec() );
- ASSERT_THROWS( exhaust(), UserException );
- }
- protected:
- virtual void populateData() = 0;
- virtual BSONObj sortSpec() { return BSON( "a" << 1 ); }
- private:
- void exhaust() {
- while (sort()->getNext()) {
- // do nothing
- }
- }
- };
+/** Aggregate the value of an operator expression. */
+class AggregateOperatorExpression : public ExpressionBase {
+ BSONObj doc() {
+ return BSON("a" << 6);
+ }
+ BSONObj spec() {
+ return BSON("_id" << 0 << "z" << BSON("$first"
+ << "$a"));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "z" << 6);
+ }
+};
- /** No documents in source. */
- class Empty : public CheckResultsBase {
- };
+struct ValueCmp {
+ bool operator()(const Value& a, const Value& b) const {
+ return Value::compare(a, b) < 0;
+ }
+};
+typedef map<Value, Document, ValueCmp> IdMap;
+
+class CheckResultsBase : public Base {
+public:
+ virtual ~CheckResultsBase() {}
+ void run() {
+ runSharded(false);
+ client.dropCollection(ns);
+ runSharded(true);
+ }
+ void runSharded(bool sharded) {
+ populateData();
+ createSource();
+ createGroup(groupSpec());
+
+ intrusive_ptr<DocumentSource> sink = group();
+ if (sharded) {
+ sink = createMerger();
+ // Serialize and re-parse the shard stage.
+ createGroup(toBson(group())["$group"].Obj(), true);
+ sink->setSource(group());
+ }
- /** Sort a single document. */
- class SingleValue : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 1 ) );
- }
- string expectedResultSetString() { return "[{_id:0,a:1}]"; }
- };
-
- /** Sort two documents. */
- class TwoValues : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 2 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << 1 ) );
- }
- string expectedResultSetString() { return "[{_id:1,a:1},{_id:0,a:2}]"; }
- };
-
- /** Sort spec is not an object. */
- class NonObjectSpec : public Base {
- public:
- void run() {
- BSONObj spec = BSON( "$sort" << 1 );
- BSONElement specElement = spec.firstElement();
- ASSERT_THROWS( DocumentSourceSort::createFromBson( specElement, ctx() ),
- UserException );
- }
- };
-
- /** Sort spec is an empty object. */
- class EmptyObjectSpec : public InvalidSpecBase {
- BSONObj sortSpec() { return BSONObj(); }
- };
-
- /** Sort spec value is not a number. */
- class NonNumberDirectionSpec : public InvalidSpecBase {
- BSONObj sortSpec() { return BSON( "a" << "b" ); }
- };
-
- /** Sort spec value is not a valid number. */
- class InvalidNumberDirectionSpec : public InvalidSpecBase {
- BSONObj sortSpec() { return BSON( "a" << 0 ); }
- };
-
- /** Sort spec with a descending field. */
- class DescendingOrder : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 2 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << 1 ) );
- }
- string expectedResultSetString() { return "[{_id:0,a:2},{_id:1,a:1}]"; }
- virtual BSONObj sortSpec() { return BSON( "a" << -1 ); }
- };
-
- /** Sort spec with a dotted field. */
- class DottedSortField : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << BSON( "b" << 2 ) ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << BSON( "b" << 1 ) ) );
- }
- string expectedResultSetString() { return "[{_id:1,a:{b:1}},{_id:0,a:{b:2}}]"; }
- virtual BSONObj sortSpec() { return BSON( "a.b" << 1 ); }
- };
-
- /** Sort spec with a compound key. */
- class CompoundSortSpec : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 1 << "b" << 3 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << 1 << "b" << 2 ) );
- client.insert( ns, BSON( "_id" << 2 << "a" << 0 << "b" << 4 ) );
- }
- string expectedResultSetString() {
- return "[{_id:2,a:0,b:4},{_id:1,a:1,b:2},{_id:0,a:1,b:3}]";
- }
- virtual BSONObj sortSpec() { return BSON( "a" << 1 << "b" << 1 ); }
- };
-
- /** Sort spec with a compound key and descending order. */
- class CompoundSortSpecAlternateOrder : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 1 << "b" << 3 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << 1 << "b" << 2 ) );
- client.insert( ns, BSON( "_id" << 2 << "a" << 0 << "b" << 4 ) );
- }
- string expectedResultSetString() {
- return "[{_id:1,a:1,b:2},{_id:0,a:1,b:3},{_id:2,a:0,b:4}]";
- }
- virtual BSONObj sortSpec() { return BSON( "a" << -1 << "b" << 1 ); }
- };
-
- /** Sort spec with a compound key and descending order. */
- class CompoundSortSpecAlternateOrderSecondField : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 1 << "b" << 3 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << 1 << "b" << 2 ) );
- client.insert( ns, BSON( "_id" << 2 << "a" << 0 << "b" << 4 ) );
- }
- string expectedResultSetString() {
- return "[{_id:2,a:0,b:4},{_id:0,a:1,b:3},{_id:1,a:1,b:2}]";
- }
- virtual BSONObj sortSpec() { return BSON( "a" << 1 << "b" << -1 ); }
- };
-
- /** Sorting different types is not supported. */
- class InconsistentTypeSort : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON("_id" << 0 << "a" << 1) );
- client.insert( ns, BSON("_id" << 1 << "a" << "foo") );
- }
- string expectedResultSetString() {
- return "[{_id:0,a:1},{_id:1,a:\"foo\"}]";
- }
- };
+ checkResultSet(sink);
+ }
- /** Sorting different numeric types is supported. */
- class MixedNumericSort : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 2.3 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << 1 ) );
- }
- string expectedResultSetString() {
- return "[{_id:1,a:1},{_id:0,a:2.3}]";
- }
- };
+protected:
+ virtual void populateData() {}
+ virtual BSONObj groupSpec() {
+ return BSON("_id" << 0);
+ }
+ /** Expected results. Must be sorted by _id to ensure consistent ordering. */
+ virtual BSONObj expectedResultSet() {
+ BSONObj wrappedResult =
+ // fromjson cannot parse an array, so place the array within an object.
+ fromjson(string("{'':") + expectedResultSetString() + "}");
+ return wrappedResult[""].embeddedObject().getOwned();
+ }
+ /** Expected results. Must be sorted by _id to ensure consistent ordering. */
+ virtual string expectedResultSetString() {
+ return "[]";
+ }
+ intrusive_ptr<DocumentSource> createMerger() {
+ // Set up a group merger to simulate merging results in the router. In this
+ // case only one shard is in use.
+ SplittableDocumentSource* splittable = dynamic_cast<SplittableDocumentSource*>(group());
+ ASSERT(splittable);
+ intrusive_ptr<DocumentSource> routerSource = splittable->getMergeSource();
+ ASSERT_NOT_EQUALS(group(), routerSource.get());
+ return routerSource;
+ }
+ void checkResultSet(const intrusive_ptr<DocumentSource>& sink) {
+ // Load the results from the DocumentSourceGroup and sort them by _id.
+ IdMap resultSet;
+ while (boost::optional<Document> current = sink->getNext()) {
+ // Save the current result.
+ Value id = current->getField("_id");
+ resultSet[id] = *current;
+ }
+ // Verify the DocumentSourceGroup is exhausted.
+ assertExhausted(sink);
+
+ // Convert results to BSON once they all have been retrieved (to detect any errors
+ // resulting from incorrectly shared sub objects).
+ BSONArrayBuilder bsonResultSet;
+ for (IdMap::const_iterator i = resultSet.begin(); i != resultSet.end(); ++i) {
+ bsonResultSet << i->second;
+ }
+ // Check the result set.
+ ASSERT_EQUALS(expectedResultSet(), bsonResultSet.arr());
+ }
+};
- /** Ordering of a missing value. */
- class MissingValue : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 1 ) );
- client.insert( ns, BSON( "_id" << 1 ) );
- }
- string expectedResultSetString() {
- return "[{_id:1},{_id:0,a:1}]";
- }
- };
-
- /** Ordering of a null value. */
- class NullValue : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << 1 ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << BSONNULL ) );
- }
- string expectedResultSetString() {
- return "[{_id:1,a:null},{_id:0,a:1}]";
- }
- };
+/** An empty collection generates no results. */
+class EmptyCollection : public CheckResultsBase {};
- /** A missing nested object within an array returns an empty array. */
- class MissingObjectWithinArray : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << BSON_ARRAY( 1 ) ) );
- client.insert( ns, BSON( "_id" << 1 << "a" << BSON_ARRAY( BSON("b" << 1) ) ) );
- }
- string expectedResultSetString() {
- return "[{_id:0,a:[1]},{_id:1,a:[{b:1}]}]";
- }
- BSONObj sortSpec() { return BSON( "a.b" << 1 ); }
- };
-
- /** Compare nested values from within an array. */
- class ExtractArrayValues : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:[{b:1},{b:2}]}" ) );
- client.insert( ns, fromjson( "{_id:1,a:[{b:1},{b:1}]}" ) );
- }
- string expectedResultSetString() {
- return "[{_id:1,a:[{b:1},{b:1}]},{_id:0,a:[{b:1},{b:2}]}]";
- }
- BSONObj sortSpec() { return BSON( "a.b" << 1 ); }
- };
-
- /** Dependant field paths. */
- class Dependencies : public Base {
- public:
- void run() {
- createSort( BSON( "a" << 1 << "b.c" << -1 ) );
- DepsTracker dependencies;
- ASSERT_EQUALS( DocumentSource::SEE_NEXT, sort()->getDependencies( &dependencies ) );
- ASSERT_EQUALS( 2U, dependencies.fields.size() );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "a" ) );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "b.c" ) );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
-
- } // namespace DocumentSourceSort
-
- namespace DocumentSourceUnwind {
-
- using mongo::DocumentSourceUnwind;
-
- class Base : public DocumentSourceCursor::Base {
- protected:
- void createUnwind( const string& unwindFieldPath = "$a" ) {
- BSONObj spec = BSON( "$unwind" << unwindFieldPath );
- BSONElement specElement = spec.firstElement();
- _unwind = DocumentSourceUnwind::createFromBson( specElement, ctx() );
- checkBsonRepresentation( spec );
- _unwind->setSource( source() );
- }
- DocumentSource* unwind() { return _unwind.get(); }
- /** Assert that iterator state accessors consistently report the source is exhausted. */
- void assertExhausted() const {
- ASSERT( !_unwind->getNext() );
- ASSERT( !_unwind->getNext() );
- ASSERT( !_unwind->getNext() );
- }
- private:
- /**
- * Check that the BSON representation generated by the source matches the BSON it was
- * created with.
- */
- void checkBsonRepresentation( const BSONObj& spec ) {
- vector<Value> arr;
- _unwind->serializeToArray(arr);
- BSONObj generatedSpec = Value(arr[0]).getDocument().toBson();
- ASSERT_EQUALS( spec, generatedSpec );
- }
- intrusive_ptr<DocumentSource> _unwind;
- };
-
- class CheckResultsBase : public Base {
- public:
- virtual ~CheckResultsBase() {}
- void run() {
- populateData();
- createSource();
- createUnwind( unwindFieldPath() );
-
- // Load the results from the DocumentSourceUnwind.
- vector<Document> resultSet;
- while (boost::optional<Document> current = unwind()->getNext()) {
- // Get the current result.
- resultSet.push_back(*current);
- }
- // Verify the DocumentSourceUnwind is exhausted.
- assertExhausted();
-
- // Convert results to BSON once they all have been retrieved (to detect any errors
- // resulting from incorrectly shared sub objects).
- BSONArrayBuilder bsonResultSet;
- for( vector<Document>::const_iterator i = resultSet.begin();
- i != resultSet.end(); ++i ) {
- bsonResultSet << *i;
- }
- // Check the result set.
- ASSERT_EQUALS( expectedResultSet(), bsonResultSet.arr() );
- }
- protected:
- virtual void populateData() {}
- virtual BSONObj expectedResultSet() const {
- BSONObj wrappedResult =
- // fromjson cannot parse an array, so place the array within an object.
- fromjson( string( "{'':" ) + expectedResultSetString() + "}" );
- return wrappedResult[ "" ].embeddedObject().getOwned();
- }
- virtual string expectedResultSetString() const { return "[]"; }
- virtual string unwindFieldPath() const { return "$a"; }
- };
-
- class UnexpectedTypeBase : public Base {
- public:
- virtual ~UnexpectedTypeBase() {}
- void run() {
- populateData();
- createSource();
- createUnwind();
- // A UserException is thrown during iteration.
- ASSERT_THROWS( iterateAll(), UserException );
- }
- protected:
- virtual void populateData() {}
- private:
- void iterateAll() {
- while (unwind()->getNext()) {
- // do nothing
- }
- }
- };
+/** A $group performed on a single document. */
+class SingleDocument : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << 1));
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id" << 0 << "a" << BSON("$sum"
+ << "$a"));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:0,a:1}]";
+ }
+};
- /** An empty collection produces no results. */
- class Empty : public CheckResultsBase {
- };
+/** A $group performed on two values for a single key. */
+class TwoValuesSingleKey : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << 1));
+ client.insert(ns, BSON("a" << 2));
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id" << 0 << "a" << BSON("$push"
+ << "$a"));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:0,a:[1,2]}]";
+ }
+};
- /** A document without the unwind field produces no results. */
- class MissingField : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSONObj() );
- }
- };
+/** A $group performed on two values with one key each. */
+class TwoValuesTwoKeys : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1));
+ client.insert(ns, BSON("_id" << 1 << "a" << 2));
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id"
+ << "$_id"
+ << "a" << BSON("$push"
+ << "$a"));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:0,a:[1]},{_id:1,a:[2]}]";
+ }
+};
+
+/** A $group performed on two values with two keys each. */
+class FourValuesTwoKeys : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("id" << 0 << "a" << 1));
+ client.insert(ns, BSON("id" << 1 << "a" << 2));
+ client.insert(ns, BSON("id" << 0 << "a" << 3));
+ client.insert(ns, BSON("id" << 1 << "a" << 4));
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id"
+ << "$id"
+ << "a" << BSON("$push"
+ << "$a"));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:0,a:[1,3]},{_id:1,a:[2,4]}]";
+ }
+};
+
+/** A $group performed on two values with two keys each and two accumulator operations. */
+class FourValuesTwoKeysTwoAccumulators : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("id" << 0 << "a" << 1));
+ client.insert(ns, BSON("id" << 1 << "a" << 2));
+ client.insert(ns, BSON("id" << 0 << "a" << 3));
+ client.insert(ns, BSON("id" << 1 << "a" << 4));
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id"
+ << "$id"
+ << "list" << BSON("$push"
+ << "$a") << "sum"
+ << BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:0,list:[1,3],sum:2},{_id:1,list:[2,4],sum:3}]";
+ }
+};
- /** A document with a null field produces no results. */
- class NullField : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "a" << BSONNULL ) );
- }
- };
+/** Null and undefined _id values are grouped together. */
+class GroupNullUndefinedIds : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << BSONNULL << "b" << 100));
+ client.insert(ns, BSON("b" << 10));
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id"
+ << "$a"
+ << "sum" << BSON("$sum"
+ << "$b"));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:null,sum:110}]";
+ }
+};
+
+/** A complex _id expression. */
+class ComplexId : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns,
+ BSON("a"
+ << "de"
+ << "b"
+ << "ad"
+ << "c"
+ << "beef"
+ << "d"
+ << ""));
+ client.insert(ns,
+ BSON("a"
+ << "d"
+ << "b"
+ << "eadbe"
+ << "c"
+ << ""
+ << "d"
+ << "ef"));
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id" << BSON("$concat" << BSON_ARRAY("$a"
+ << "$b"
+ << "$c"
+ << "$d")));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:'deadbeef'}]";
+ }
+};
- /** A document with an empty array produces no results. */
- class EmptyArray : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "a" << BSONArray() ) );
- }
- };
+/** An undefined accumulator value is dropped. */
+class UndefinedAccumulatorValue : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSONObj());
+ }
+ virtual BSONObj groupSpec() {
+ return BSON("_id" << 0 << "first" << BSON("$first"
+ << "$missing"));
+ }
+ virtual string expectedResultSetString() {
+ return "[{_id:0, first:null}]";
+ }
+};
+
+/** Simulate merging sharded results in the router. */
+class RouterMerger : public CheckResultsBase {
+public:
+ void run() {
+ BSONObj sourceData = fromjson(
+ "{'':[{_id:0,list:[1,2]},{_id:1,list:[3,4]}" // from shard 1
+ ",{_id:0,list:[10,20]},{_id:1,list:[30,40]}]}" // from shard 2
+ );
+ BSONElement sourceDataElement = sourceData.firstElement();
+ // Create a source with synthetic data.
+ intrusive_ptr<DocumentSourceBsonArray> source =
+ DocumentSourceBsonArray::create(sourceDataElement.Obj(), ctx());
+ // Create a group source.
+ createGroup(BSON("_id"
+ << "$x"
+ << "list" << BSON("$push"
+ << "$y")));
+ // Create a merger version of the source.
+ intrusive_ptr<DocumentSource> group = createMerger();
+ // Attach the merger to the synthetic shard results.
+ group->setSource(source.get());
+ // Check the merger's output.
+ checkResultSet(group);
+ }
- /** A document with a number field produces a UserException. */
- class UnexpectedNumber : public UnexpectedTypeBase {
- void populateData() {
- client.insert( ns, BSON( "a" << 1 ) );
- }
- };
+private:
+ string expectedResultSetString() {
+ return "[{_id:0,list:[1,2,10,20]},{_id:1,list:[3,4,30,40]}]";
+ }
+};
+
+/** Dependant field paths. */
+class Dependencies : public Base {
+public:
+ void run() {
+ createGroup(fromjson("{_id:'$x',a:{$sum:'$y.z'},b:{$avg:{$add:['$u','$v']}}}"));
+ DepsTracker dependencies;
+ ASSERT_EQUALS(DocumentSource::EXHAUSTIVE_ALL, group()->getDependencies(&dependencies));
+ ASSERT_EQUALS(4U, dependencies.fields.size());
+ // Dependency from _id expression.
+ ASSERT_EQUALS(1U, dependencies.fields.count("x"));
+ // Dependencies from accumulator expressions.
+ ASSERT_EQUALS(1U, dependencies.fields.count("y.z"));
+ ASSERT_EQUALS(1U, dependencies.fields.count("u"));
+ ASSERT_EQUALS(1U, dependencies.fields.count("v"));
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
- /** An additional document with a number field produces a UserException. */
- class LaterUnexpectedNumber : public UnexpectedTypeBase {
- void populateData() {
- client.insert( ns, BSON( "a" << BSON_ARRAY( 1 ) ) );
- client.insert( ns, BSON( "a" << 1 ) );
- }
- };
+/**
+ * A string constant (not a field path) as an _id expression and passed to an accumulator.
+ * SERVER-6766
+ */
+class StringConstantIdAndAccumulatorExpressions : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSONObj());
+ }
+ BSONObj groupSpec() {
+ return fromjson("{_id:{$const:'$_id...'},a:{$push:{$const:'$a...'}}}");
+ }
+ string expectedResultSetString() {
+ return "[{_id:'$_id...',a:['$a...']}]";
+ }
+};
+
+/** An array constant passed to an accumulator. */
+class ArrayConstantAccumulatorExpression : public CheckResultsBase {
+public:
+ void run() {
+ // A parse exception is thrown when a raw array is provided to an accumulator.
+ ASSERT_THROWS(createGroup(fromjson("{_id:1,a:{$push:[4,5,6]}}")), UserException);
+ // Run standard base tests.
+ CheckResultsBase::run();
+ }
+ void populateData() {
+ client.insert(ns, BSONObj());
+ }
+ BSONObj groupSpec() {
+ // An array can be specified using $const.
+ return fromjson("{_id:[1,2,3],a:{$push:{$const:[4,5,6]}}}");
+ }
+ string expectedResultSetString() {
+ return "[{_id:[1,2,3],a:[[4,5,6]]}]";
+ }
+};
- /** A document with a string field produces a UserException. */
- class UnexpectedString : public UnexpectedTypeBase {
- void populateData() {
- client.insert( ns, BSON( "a" << "foo" ) );
- }
- };
+} // namespace DocumentSourceGroup
- /** A document with an object field produces a UserException. */
- class UnexpectedObject : public UnexpectedTypeBase {
- void populateData() {
- client.insert( ns, BSON( "a" << BSONObj() ) );
- }
- };
+namespace DocumentSourceProject {
- /** Unwind an array with one value. */
- class UnwindOneValue : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << BSON_ARRAY( 1 ) ) );
- }
- string expectedResultSetString() const { return "[{_id:0,a:1}]"; }
- };
+using mongo::DocumentSourceProject;
- /** Unwind an array with two values. */
- class UnwindTwoValues : public CheckResultsBase {
- void populateData() {
- client.insert( ns, BSON( "_id" << 0 << "a" << BSON_ARRAY( 1 << 2 ) ) );
- }
- string expectedResultSetString() const { return "[{_id:0,a:1},{_id:0,a:2}]"; }
- };
+class Base : public DocumentSourceCursor::Base {
+protected:
+ void createProject(const BSONObj& projection = BSON("a" << true)) {
+ BSONObj spec = BSON("$project" << projection);
+ BSONElement specElement = spec.firstElement();
+ _project = DocumentSourceProject::createFromBson(specElement, ctx());
+ checkBsonRepresentation(spec);
+ _project->setSource(source());
+ }
+ DocumentSource* project() {
+ return _project.get();
+ }
+ /** Assert that iterator state accessors consistently report the source is exhausted. */
+ void assertExhausted() const {
+ ASSERT(!_project->getNext());
+ ASSERT(!_project->getNext());
+ ASSERT(!_project->getNext());
+ }
+ /**
+ * Check that the BSON representation generated by the souce matches the BSON it was
+ * created with.
+ */
+ void checkBsonRepresentation(const BSONObj& spec) {
+ vector<Value> arr;
+ _project->serializeToArray(arr);
+ BSONObj generatedSpec = arr[0].getDocument().toBson();
+ ASSERT_EQUALS(spec, generatedSpec);
+ }
- /** Unwind an array with two values, one of which is null. */
- class UnwindNull : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:[1,null]}" ) );
- }
- string expectedResultSetString() const { return "[{_id:0,a:1},{_id:0,a:null}]"; }
- };
-
- /** Unwind two documents with arrays. */
- class TwoDocuments : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:[1,2]}" ) );
- client.insert( ns, fromjson( "{_id:1,a:[3,4]}" ) );
- }
- string expectedResultSetString() const {
- return "[{_id:0,a:1},{_id:0,a:2},{_id:1,a:3},{_id:1,a:4}]";
- }
- };
+private:
+ intrusive_ptr<DocumentSource> _project;
+};
+
+/** The 'a' and 'c.d' fields are included, but the 'b' field is not. */
+class Inclusion : public Base {
+public:
+ void run() {
+ client.insert(ns, fromjson("{_id:0,a:1,b:1,c:{d:1}}"));
+ createSource();
+ createProject(BSON("a" << true << "c" << BSON("d" << true)));
+ // The first result exists and is as expected.
+ boost::optional<Document> next = project()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(1, next->getField("a").getInt());
+ ASSERT(next->getField("b").missing());
+ // The _id field is included by default in the root document.
+ ASSERT_EQUALS(0, next->getField("_id").getInt());
+ // The nested c.d inclusion.
+ ASSERT_EQUALS(1, (*next)["c"]["d"].getInt());
+ }
+};
+
+/** Optimize the projection. */
+class Optimize : public Base {
+public:
+ void run() {
+ createProject(BSON("a" << BSON("$and" << BSON_ARRAY(BSON("$const" << true)))));
+ project()->optimize();
+ // Optimizing the DocumentSourceProject optimizes the Expressions that comprise it,
+ // in this case replacing an expression depending on constants with a constant.
+ checkBsonRepresentation(fromjson("{$project:{a:{$const:true}}}"));
+ }
+};
+
+/** Projection spec is not an object. */
+class NonObjectSpec : public Base {
+public:
+ void run() {
+ BSONObj spec = BSON("$project"
+ << "foo");
+ BSONElement specElement = spec.firstElement();
+ ASSERT_THROWS(DocumentSourceProject::createFromBson(specElement, ctx()), UserException);
+ }
+};
- /** Unwind an array in a nested document. */
- class NestedArray : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:{b:[1,2],c:3}}" ) );
- }
- string expectedResultSetString() const {
- return "[{_id:0,a:{b:1,c:3}},{_id:0,a:{b:2,c:3}}]";
- }
- string unwindFieldPath() const { return "$a.b"; }
- };
+/** Projection spec is an empty object. */
+class EmptyObjectSpec : public Base {
+public:
+ void run() {
+ ASSERT_THROWS(createProject(BSONObj()), UserException);
+ }
+};
- /** A missing array (that cannot be nested below a non object field) produces no results. */
- class NonObjectParent : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:4}" ) );
- }
- string unwindFieldPath() const { return "$a.b"; }
- };
+/** Projection spec contains a top level dollar sign. */
+class TopLevelDollar : public Base {
+public:
+ void run() {
+ ASSERT_THROWS(createProject(BSON("$add" << BSONArray())), UserException);
+ }
+};
- /** Unwind an array in a doubly nested document. */
- class DoubleNestedArray : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:{b:{d:[1,2],e:4},c:3}}" ) );
- }
- string expectedResultSetString() const {
- return "[{_id:0,a:{b:{d:1,e:4},c:3}},{_id:0,a:{b:{d:2,e:4},c:3}}]";
- }
- string unwindFieldPath() const { return "$a.b.d"; }
- };
-
- /** Unwind several documents in a row. */
- class SeveralDocuments : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:[1,2,3]}" ) );
- client.insert( ns, fromjson( "{_id:1}" ) );
- client.insert( ns, fromjson( "{_id:2}" ) );
- client.insert( ns, fromjson( "{_id:3,a:[10,20]}" ) );
- client.insert( ns, fromjson( "{_id:4,a:[30]}" ) );
- }
- string expectedResultSetString() const {
- return "[{_id:0,a:1},{_id:0,a:2},{_id:0,a:3},{_id:3,a:10},"
- "{_id:3,a:20},{_id:4,a:30}]";
- }
- };
-
- /** Unwind several more documents in a row. */
- class SeveralMoreDocuments : public CheckResultsBase {
- void populateData() {
- client.insert( ns, fromjson( "{_id:0,a:null}" ) );
- client.insert( ns, fromjson( "{_id:1}" ) );
- client.insert( ns, fromjson( "{_id:2,a:['a','b']}" ) );
- client.insert( ns, fromjson( "{_id:3}" ) );
- client.insert( ns, fromjson( "{_id:4,a:[1,2,3]}" ) );
- client.insert( ns, fromjson( "{_id:5,a:[4,5,6]}" ) );
- client.insert( ns, fromjson( "{_id:6,a:[7,8,9]}" ) );
- client.insert( ns, fromjson( "{_id:7,a:[]}" ) );
- }
- string expectedResultSetString() const {
- return "[{_id:2,a:'a'},{_id:2,a:'b'},{_id:4,a:1},{_id:4,a:2},"
- "{_id:4,a:3},{_id:5,a:4},{_id:5,a:5},{_id:5,a:6},"
- "{_id:6,a:7},{_id:6,a:8},{_id:6,a:9}]";
- }
- };
-
- /** Dependant field paths. */
- class Dependencies : public Base {
- public:
- void run() {
- createUnwind( "$x.y.z" );
- DepsTracker dependencies;
- ASSERT_EQUALS( DocumentSource::SEE_NEXT,
- unwind()->getDependencies( &dependencies ) );
- ASSERT_EQUALS( 1U, dependencies.fields.size() );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "x.y.z" ) );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
+/** Projection spec is invalid. */
+class InvalidSpec : public Base {
+public:
+ void run() {
+ ASSERT_THROWS(createProject(BSON("a" << BSON("$invalidOperator" << 1))), UserException);
+ }
+};
+
+/** Two documents are projected. */
+class TwoDocuments : public Base {
+public:
+ void run() {
+ client.insert(ns, BSON("a" << 1 << "b" << 2));
+ client.insert(ns, BSON("a" << 3 << "b" << 4));
+ createSource();
+ createProject();
+ boost::optional<Document> next = project()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(1, next->getField("a").getInt());
+ ASSERT(next->getField("b").missing());
+
+ next = project()->getNext();
+ ASSERT(bool(next));
+ ASSERT_EQUALS(3, next->getField("a").getInt());
+ ASSERT(next->getField("b").missing());
+
+ assertExhausted();
+ }
+};
+
+/** List of dependent field paths. */
+class Dependencies : public Base {
+public:
+ void run() {
+ createProject(fromjson("{a:true,x:'$b',y:{$and:['$c','$d']}, z: {$meta:'textScore'}}"));
+ DepsTracker dependencies;
+ ASSERT_EQUALS(DocumentSource::EXHAUSTIVE_FIELDS, project()->getDependencies(&dependencies));
+ ASSERT_EQUALS(5U, dependencies.fields.size());
+ // Implicit _id dependency.
+ ASSERT_EQUALS(1U, dependencies.fields.count("_id"));
+ // Inclusion dependency.
+ ASSERT_EQUALS(1U, dependencies.fields.count("a"));
+ // Field path expression dependency.
+ ASSERT_EQUALS(1U, dependencies.fields.count("b"));
+ // Nested expression dependencies.
+ ASSERT_EQUALS(1U, dependencies.fields.count("c"));
+ ASSERT_EQUALS(1U, dependencies.fields.count("d"));
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(true, dependencies.needTextScore);
+ }
+};
- } // namespace DocumentSourceUnwind
+} // namespace DocumentSourceProject
- namespace DocumentSourceGeoNear {
- using mongo::DocumentSourceGeoNear;
- using mongo::DocumentSourceLimit;
+namespace DocumentSourceSort {
- class LimitCoalesce : public DocumentSourceCursor::Base {
- public:
- void run() {
- intrusive_ptr<DocumentSourceGeoNear> geoNear = DocumentSourceGeoNear::create(ctx());
+using mongo::DocumentSourceSort;
- ASSERT_EQUALS(geoNear->getLimit(), 100);
+class Base : public DocumentSourceCursor::Base {
+protected:
+ void createSort(const BSONObj& sortKey = BSON("a" << 1)) {
+ BSONObj spec = BSON("$sort" << sortKey);
+ BSONElement specElement = spec.firstElement();
+ _sort = DocumentSourceSort::createFromBson(specElement, ctx());
+ checkBsonRepresentation(spec);
+ _sort->setSource(source());
+ }
+ DocumentSourceSort* sort() {
+ return dynamic_cast<DocumentSourceSort*>(_sort.get());
+ }
+ /** Assert that iterator state accessors consistently report the source is exhausted. */
+ void assertExhausted() const {
+ ASSERT(!_sort->getNext());
+ ASSERT(!_sort->getNext());
+ ASSERT(!_sort->getNext());
+ }
- ASSERT(geoNear->coalesce(DocumentSourceLimit::create(ctx(), 200)));
- ASSERT_EQUALS(geoNear->getLimit(), 100);
+private:
+ /**
+ * Check that the BSON representation generated by the souce matches the BSON it was
+ * created with.
+ */
+ void checkBsonRepresentation(const BSONObj& spec) {
+ vector<Value> arr;
+ _sort->serializeToArray(arr);
+ BSONObj generatedSpec = arr[0].getDocument().toBson();
+ ASSERT_EQUALS(spec, generatedSpec);
+ }
+ intrusive_ptr<DocumentSource> _sort;
+};
+
+class SortWithLimit : public Base {
+public:
+ void run() {
+ createSort(BSON("a" << 1));
+ ASSERT_EQUALS(sort()->getLimit(), -1);
+
+ { // pre-limit checks
+ vector<Value> arr;
+ sort()->serializeToArray(arr);
+ ASSERT_EQUALS(arr[0].getDocument().toBson(), BSON("$sort" << BSON("a" << 1)));
+
+ ASSERT(sort()->getShardSource() == NULL);
+ ASSERT(sort()->getMergeSource() != NULL);
+ }
- ASSERT(geoNear->coalesce(DocumentSourceLimit::create(ctx(), 50)));
- ASSERT_EQUALS(geoNear->getLimit(), 50);
+ ASSERT_TRUE(sort()->coalesce(mkLimit(10)));
+ ASSERT_EQUALS(sort()->getLimit(), 10);
+ ASSERT_TRUE(sort()->coalesce(mkLimit(15)));
+ ASSERT_EQUALS(sort()->getLimit(), 10); // unchanged
+ ASSERT_TRUE(sort()->coalesce(mkLimit(5)));
+ ASSERT_EQUALS(sort()->getLimit(), 5); // reduced
- ASSERT(geoNear->coalesce(DocumentSourceLimit::create(ctx(), 30)));
- ASSERT_EQUALS(geoNear->getLimit(), 30);
- }
- };
- } // namespace DocumentSourceGeoNear
+ vector<Value> arr;
+ sort()->serializeToArray(arr);
+ ASSERT_EQUALS(
+ Value(arr),
+ DOC_ARRAY(DOC("$sort" << DOC("a" << 1)) << DOC("$limit" << sort()->getLimit())));
- namespace DocumentSourceMatch {
- using mongo::DocumentSourceMatch;
+ ASSERT(sort()->getShardSource() != NULL);
+ ASSERT(sort()->getMergeSource() != NULL);
+ }
- // Helpers to make a DocumentSourceMatch from a query object or json string
- intrusive_ptr<DocumentSourceMatch> makeMatch(const BSONObj& query) {
- intrusive_ptr<DocumentSource> uncasted =
- DocumentSourceMatch::createFromBson(BSON("$match" << query).firstElement(), NULL);
- return dynamic_cast<DocumentSourceMatch*>(uncasted.get());
+ intrusive_ptr<DocumentSource> mkLimit(int limit) {
+ BSONObj obj = BSON("$limit" << limit);
+ BSONElement e = obj.firstElement();
+ return mongo::DocumentSourceLimit::createFromBson(e, ctx());
+ }
+};
+
+class CheckResultsBase : public Base {
+public:
+ virtual ~CheckResultsBase() {}
+ void run() {
+ populateData();
+ createSource();
+ createSort(sortSpec());
+
+ // Load the results from the DocumentSourceUnwind.
+ vector<Document> resultSet;
+ while (boost::optional<Document> current = sort()->getNext()) {
+ // Get the current result.
+ resultSet.push_back(*current);
}
- intrusive_ptr<DocumentSourceMatch> makeMatch(const string& queryJson) {
- return makeMatch(fromjson(queryJson));
+ // Verify the DocumentSourceUnwind is exhausted.
+ assertExhausted();
+
+ // Convert results to BSON once they all have been retrieved (to detect any errors
+ // resulting from incorrectly shared sub objects).
+ BSONArrayBuilder bsonResultSet;
+ for (vector<Document>::const_iterator i = resultSet.begin(); i != resultSet.end(); ++i) {
+ bsonResultSet << *i;
}
+ // Check the result set.
+ ASSERT_EQUALS(expectedResultSet(), bsonResultSet.arr());
+ }
- class RedactSafePortion {
- public:
- void test(string input, string safePortion) {
- try {
- intrusive_ptr<DocumentSourceMatch> match = makeMatch(input);
- ASSERT_EQUALS(match->redactSafePortion(), fromjson(safePortion));
- } catch(...) {
- unittest::log() << "Problem with redactSafePortion() of: " << input;
- throw;
- }
- }
+protected:
+ virtual void populateData() {}
+ virtual BSONObj expectedResultSet() {
+ BSONObj wrappedResult =
+ // fromjson cannot parse an array, so place the array within an object.
+ fromjson(string("{'':") + expectedResultSetString() + "}");
+ return wrappedResult[""].embeddedObject().getOwned();
+ }
+ virtual string expectedResultSetString() {
+ return "[]";
+ }
+ virtual BSONObj sortSpec() {
+ return BSON("a" << 1);
+ }
+};
+
+class InvalidSpecBase : public Base {
+public:
+ virtual ~InvalidSpecBase() {}
+ void run() {
+ ASSERT_THROWS(createSort(sortSpec()), UserException);
+ }
+
+protected:
+ virtual BSONObj sortSpec() = 0;
+};
+
+class InvalidOperationBase : public Base {
+public:
+ virtual ~InvalidOperationBase() {}
+ void run() {
+ populateData();
+ createSource();
+ createSort(sortSpec());
+ ASSERT_THROWS(exhaust(), UserException);
+ }
+
+protected:
+ virtual void populateData() = 0;
+ virtual BSONObj sortSpec() {
+ return BSON("a" << 1);
+ }
+
+private:
+ void exhaust() {
+ while (sort()->getNext()) {
+ // do nothing
+ }
+ }
+};
+
+/** No documents in source. */
+class Empty : public CheckResultsBase {};
+
+/** Sort a single document. */
+class SingleValue : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1));
+ }
+ string expectedResultSetString() {
+ return "[{_id:0,a:1}]";
+ }
+};
+
+/** Sort two documents. */
+class TwoValues : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 2));
+ client.insert(ns, BSON("_id" << 1 << "a" << 1));
+ }
+ string expectedResultSetString() {
+ return "[{_id:1,a:1},{_id:0,a:2}]";
+ }
+};
+
+/** Sort spec is not an object. */
+class NonObjectSpec : public Base {
+public:
+ void run() {
+ BSONObj spec = BSON("$sort" << 1);
+ BSONElement specElement = spec.firstElement();
+ ASSERT_THROWS(DocumentSourceSort::createFromBson(specElement, ctx()), UserException);
+ }
+};
+
+/** Sort spec is an empty object. */
+class EmptyObjectSpec : public InvalidSpecBase {
+ BSONObj sortSpec() {
+ return BSONObj();
+ }
+};
+
+/** Sort spec value is not a number. */
+class NonNumberDirectionSpec : public InvalidSpecBase {
+ BSONObj sortSpec() {
+ return BSON("a"
+ << "b");
+ }
+};
+
+/** Sort spec value is not a valid number. */
+class InvalidNumberDirectionSpec : public InvalidSpecBase {
+ BSONObj sortSpec() {
+ return BSON("a" << 0);
+ }
+};
+
+/** Sort spec with a descending field. */
+class DescendingOrder : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 2));
+ client.insert(ns, BSON("_id" << 1 << "a" << 1));
+ }
+ string expectedResultSetString() {
+ return "[{_id:0,a:2},{_id:1,a:1}]";
+ }
+ virtual BSONObj sortSpec() {
+ return BSON("a" << -1);
+ }
+};
+
+/** Sort spec with a dotted field. */
+class DottedSortField : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << BSON("b" << 2)));
+ client.insert(ns, BSON("_id" << 1 << "a" << BSON("b" << 1)));
+ }
+ string expectedResultSetString() {
+ return "[{_id:1,a:{b:1}},{_id:0,a:{b:2}}]";
+ }
+ virtual BSONObj sortSpec() {
+ return BSON("a.b" << 1);
+ }
+};
+
+/** Sort spec with a compound key. */
+class CompoundSortSpec : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1 << "b" << 3));
+ client.insert(ns, BSON("_id" << 1 << "a" << 1 << "b" << 2));
+ client.insert(ns, BSON("_id" << 2 << "a" << 0 << "b" << 4));
+ }
+ string expectedResultSetString() {
+ return "[{_id:2,a:0,b:4},{_id:1,a:1,b:2},{_id:0,a:1,b:3}]";
+ }
+ virtual BSONObj sortSpec() {
+ return BSON("a" << 1 << "b" << 1);
+ }
+};
+
+/** Sort spec with a compound key and descending order. */
+class CompoundSortSpecAlternateOrder : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1 << "b" << 3));
+ client.insert(ns, BSON("_id" << 1 << "a" << 1 << "b" << 2));
+ client.insert(ns, BSON("_id" << 2 << "a" << 0 << "b" << 4));
+ }
+ string expectedResultSetString() {
+ return "[{_id:1,a:1,b:2},{_id:0,a:1,b:3},{_id:2,a:0,b:4}]";
+ }
+ virtual BSONObj sortSpec() {
+ return BSON("a" << -1 << "b" << 1);
+ }
+};
+
+/** Sort spec with a compound key and descending order. */
+class CompoundSortSpecAlternateOrderSecondField : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1 << "b" << 3));
+ client.insert(ns, BSON("_id" << 1 << "a" << 1 << "b" << 2));
+ client.insert(ns, BSON("_id" << 2 << "a" << 0 << "b" << 4));
+ }
+ string expectedResultSetString() {
+ return "[{_id:2,a:0,b:4},{_id:0,a:1,b:3},{_id:1,a:1,b:2}]";
+ }
+ virtual BSONObj sortSpec() {
+ return BSON("a" << 1 << "b" << -1);
+ }
+};
+
+/** Sorting different types is not supported. */
+class InconsistentTypeSort : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1));
+ client.insert(ns,
+ BSON("_id" << 1 << "a"
+ << "foo"));
+ }
+ string expectedResultSetString() {
+ return "[{_id:0,a:1},{_id:1,a:\"foo\"}]";
+ }
+};
- void run() {
- // Empty
- test("{}",
- "{}");
+/** Sorting different numeric types is supported. */
+class MixedNumericSort : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 2.3));
+ client.insert(ns, BSON("_id" << 1 << "a" << 1));
+ }
+ string expectedResultSetString() {
+ return "[{_id:1,a:1},{_id:0,a:2.3}]";
+ }
+};
- // Basic allowed things
- test("{a:1}",
- "{a:1}");
+/** Ordering of a missing value. */
+class MissingValue : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1));
+ client.insert(ns, BSON("_id" << 1));
+ }
+ string expectedResultSetString() {
+ return "[{_id:1},{_id:0,a:1}]";
+ }
+};
- test("{a:'asdf'}",
- "{a:'asdf'}");
+/** Ordering of a null value. */
+class NullValue : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << 1));
+ client.insert(ns, BSON("_id" << 1 << "a" << BSONNULL));
+ }
+ string expectedResultSetString() {
+ return "[{_id:1,a:null},{_id:0,a:1}]";
+ }
+};
- test("{a:/asdf/i}",
- "{a:/asdf/i}");
+/** A missing nested object within an array returns an empty array. */
+class MissingObjectWithinArray : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << BSON_ARRAY(1)));
+ client.insert(ns, BSON("_id" << 1 << "a" << BSON_ARRAY(BSON("b" << 1))));
+ }
+ string expectedResultSetString() {
+ return "[{_id:0,a:[1]},{_id:1,a:[{b:1}]}]";
+ }
+ BSONObj sortSpec() {
+ return BSON("a.b" << 1);
+ }
+};
- test("{a: {$regex: 'adsf'}}",
- "{a: {$regex: 'adsf'}}");
+/** Compare nested values from within an array. */
+class ExtractArrayValues : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:[{b:1},{b:2}]}"));
+ client.insert(ns, fromjson("{_id:1,a:[{b:1},{b:1}]}"));
+ }
+ string expectedResultSetString() {
+ return "[{_id:1,a:[{b:1},{b:1}]},{_id:0,a:[{b:1},{b:2}]}]";
+ }
+ BSONObj sortSpec() {
+ return BSON("a.b" << 1);
+ }
+};
+
+/** Dependant field paths. */
+class Dependencies : public Base {
+public:
+ void run() {
+ createSort(BSON("a" << 1 << "b.c" << -1));
+ DepsTracker dependencies;
+ ASSERT_EQUALS(DocumentSource::SEE_NEXT, sort()->getDependencies(&dependencies));
+ ASSERT_EQUALS(2U, dependencies.fields.size());
+ ASSERT_EQUALS(1U, dependencies.fields.count("a"));
+ ASSERT_EQUALS(1U, dependencies.fields.count("b.c"));
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
- test("{a: {$regex: 'adsf', $options: 'i'}}",
- "{a: {$regex: 'adsf', $options: 'i'}}");
+} // namespace DocumentSourceSort
- test("{a: {$mod: [1, 0]}}",
- "{a: {$mod: [1, 0]}}");
+namespace DocumentSourceUnwind {
- test("{a: {$type: 1}}",
- "{a: {$type: 1}}");
+using mongo::DocumentSourceUnwind;
- // Basic disallowed things
- test("{a: null}",
- "{}");
+class Base : public DocumentSourceCursor::Base {
+protected:
+ void createUnwind(const string& unwindFieldPath = "$a") {
+ BSONObj spec = BSON("$unwind" << unwindFieldPath);
+ BSONElement specElement = spec.firstElement();
+ _unwind = DocumentSourceUnwind::createFromBson(specElement, ctx());
+ checkBsonRepresentation(spec);
+ _unwind->setSource(source());
+ }
+ DocumentSource* unwind() {
+ return _unwind.get();
+ }
+ /** Assert that iterator state accessors consistently report the source is exhausted. */
+ void assertExhausted() const {
+ ASSERT(!_unwind->getNext());
+ ASSERT(!_unwind->getNext());
+ ASSERT(!_unwind->getNext());
+ }
- test("{a: {}}",
- "{}");
+private:
+ /**
+ * Check that the BSON representation generated by the source matches the BSON it was
+ * created with.
+ */
+ void checkBsonRepresentation(const BSONObj& spec) {
+ vector<Value> arr;
+ _unwind->serializeToArray(arr);
+ BSONObj generatedSpec = Value(arr[0]).getDocument().toBson();
+ ASSERT_EQUALS(spec, generatedSpec);
+ }
+ intrusive_ptr<DocumentSource> _unwind;
+};
+
+class CheckResultsBase : public Base {
+public:
+ virtual ~CheckResultsBase() {}
+ void run() {
+ populateData();
+ createSource();
+ createUnwind(unwindFieldPath());
+
+ // Load the results from the DocumentSourceUnwind.
+ vector<Document> resultSet;
+ while (boost::optional<Document> current = unwind()->getNext()) {
+ // Get the current result.
+ resultSet.push_back(*current);
+ }
+ // Verify the DocumentSourceUnwind is exhausted.
+ assertExhausted();
+
+ // Convert results to BSON once they all have been retrieved (to detect any errors
+ // resulting from incorrectly shared sub objects).
+ BSONArrayBuilder bsonResultSet;
+ for (vector<Document>::const_iterator i = resultSet.begin(); i != resultSet.end(); ++i) {
+ bsonResultSet << *i;
+ }
+ // Check the result set.
+ ASSERT_EQUALS(expectedResultSet(), bsonResultSet.arr());
+ }
- test("{a: []}",
- "{}");
+protected:
+ virtual void populateData() {}
+ virtual BSONObj expectedResultSet() const {
+ BSONObj wrappedResult =
+ // fromjson cannot parse an array, so place the array within an object.
+ fromjson(string("{'':") + expectedResultSetString() + "}");
+ return wrappedResult[""].embeddedObject().getOwned();
+ }
+ virtual string expectedResultSetString() const {
+ return "[]";
+ }
+ virtual string unwindFieldPath() const {
+ return "$a";
+ }
+};
+
+class UnexpectedTypeBase : public Base {
+public:
+ virtual ~UnexpectedTypeBase() {}
+ void run() {
+ populateData();
+ createSource();
+ createUnwind();
+ // A UserException is thrown during iteration.
+ ASSERT_THROWS(iterateAll(), UserException);
+ }
- test("{'a.0': 1}",
- "{}");
+protected:
+ virtual void populateData() {}
- test("{'a.0.b': 1}",
- "{}");
+private:
+ void iterateAll() {
+ while (unwind()->getNext()) {
+ // do nothing
+ }
+ }
+};
- test("{a: {$ne: 1}}",
- "{}");
+/** An empty collection produces no results. */
+class Empty : public CheckResultsBase {};
- test("{a: {$nin: [1, 2, 3]}}",
- "{}");
+/** A document without the unwind field produces no results. */
+class MissingField : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSONObj());
+ }
+};
- test("{a: {$exists: true}}", // could be allowed but currently isn't
- "{}");
+/** A document with a null field produces no results. */
+class NullField : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << BSONNULL));
+ }
+};
- test("{a: {$exists: false}}", // can never be allowed
- "{}");
+/** A document with an empty array produces no results. */
+class EmptyArray : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << BSONArray()));
+ }
+};
- test("{a: {$size: 1}}",
- "{}");
+/** A document with a number field produces a UserException. */
+class UnexpectedNumber : public UnexpectedTypeBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << 1));
+ }
+};
- test("{$nor: [{a:1}]}",
- "{}");
+/** An additional document with a number field produces a UserException. */
+class LaterUnexpectedNumber : public UnexpectedTypeBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << BSON_ARRAY(1)));
+ client.insert(ns, BSON("a" << 1));
+ }
+};
+
+/** A document with a string field produces a UserException. */
+class UnexpectedString : public UnexpectedTypeBase {
+ void populateData() {
+ client.insert(ns,
+ BSON("a"
+ << "foo"));
+ }
+};
- // Combinations
- test("{a:1, b: 'asdf'}",
- "{a:1, b: 'asdf'}");
+/** A document with an object field produces a UserException. */
+class UnexpectedObject : public UnexpectedTypeBase {
+ void populateData() {
+ client.insert(ns, BSON("a" << BSONObj()));
+ }
+};
- test("{a:1, b: null}",
- "{a:1}");
+/** Unwind an array with one value. */
+class UnwindOneValue : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << BSON_ARRAY(1)));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:0,a:1}]";
+ }
+};
- test("{a:null, b: null}",
- "{}");
+/** Unwind an array with two values. */
+class UnwindTwoValues : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, BSON("_id" << 0 << "a" << BSON_ARRAY(1 << 2)));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:0,a:1},{_id:0,a:2}]";
+ }
+};
- // $elemMatch
+/** Unwind an array with two values, one of which is null. */
+class UnwindNull : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:[1,null]}"));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:0,a:1},{_id:0,a:null}]";
+ }
+};
- test("{a: {$elemMatch: {b: 1}}}",
- "{a: {$elemMatch: {b: 1}}}");
+/** Unwind two documents with arrays. */
+class TwoDocuments : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:[1,2]}"));
+ client.insert(ns, fromjson("{_id:1,a:[3,4]}"));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:0,a:1},{_id:0,a:2},{_id:1,a:3},{_id:1,a:4}]";
+ }
+};
- test("{a: {$elemMatch: {b:null}}}",
- "{}");
+/** Unwind an array in a nested document. */
+class NestedArray : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:{b:[1,2],c:3}}"));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:0,a:{b:1,c:3}},{_id:0,a:{b:2,c:3}}]";
+ }
+ string unwindFieldPath() const {
+ return "$a.b";
+ }
+};
- test("{a: {$elemMatch: {b:null, c:1}}}",
- "{a: {$elemMatch: {c: 1}}}");
+/** A missing array (that cannot be nested below a non object field) produces no results. */
+class NonObjectParent : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:4}"));
+ }
+ string unwindFieldPath() const {
+ return "$a.b";
+ }
+};
- // explicit $and
- test("{$and:[{a: 1}]}",
- "{$and:[{a: 1}]}");
+/** Unwind an array in a doubly nested document. */
+class DoubleNestedArray : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:{b:{d:[1,2],e:4},c:3}}"));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:0,a:{b:{d:1,e:4},c:3}},{_id:0,a:{b:{d:2,e:4},c:3}}]";
+ }
+ string unwindFieldPath() const {
+ return "$a.b.d";
+ }
+};
+
+/** Unwind several documents in a row. */
+class SeveralDocuments : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:[1,2,3]}"));
+ client.insert(ns, fromjson("{_id:1}"));
+ client.insert(ns, fromjson("{_id:2}"));
+ client.insert(ns, fromjson("{_id:3,a:[10,20]}"));
+ client.insert(ns, fromjson("{_id:4,a:[30]}"));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:0,a:1},{_id:0,a:2},{_id:0,a:3},{_id:3,a:10},"
+ "{_id:3,a:20},{_id:4,a:30}]";
+ }
+};
+
+/** Unwind several more documents in a row. */
+class SeveralMoreDocuments : public CheckResultsBase {
+ void populateData() {
+ client.insert(ns, fromjson("{_id:0,a:null}"));
+ client.insert(ns, fromjson("{_id:1}"));
+ client.insert(ns, fromjson("{_id:2,a:['a','b']}"));
+ client.insert(ns, fromjson("{_id:3}"));
+ client.insert(ns, fromjson("{_id:4,a:[1,2,3]}"));
+ client.insert(ns, fromjson("{_id:5,a:[4,5,6]}"));
+ client.insert(ns, fromjson("{_id:6,a:[7,8,9]}"));
+ client.insert(ns, fromjson("{_id:7,a:[]}"));
+ }
+ string expectedResultSetString() const {
+ return "[{_id:2,a:'a'},{_id:2,a:'b'},{_id:4,a:1},{_id:4,a:2},"
+ "{_id:4,a:3},{_id:5,a:4},{_id:5,a:5},{_id:5,a:6},"
+ "{_id:6,a:7},{_id:6,a:8},{_id:6,a:9}]";
+ }
+};
+
+/** Dependant field paths. */
+class Dependencies : public Base {
+public:
+ void run() {
+ createUnwind("$x.y.z");
+ DepsTracker dependencies;
+ ASSERT_EQUALS(DocumentSource::SEE_NEXT, unwind()->getDependencies(&dependencies));
+ ASSERT_EQUALS(1U, dependencies.fields.size());
+ ASSERT_EQUALS(1U, dependencies.fields.count("x.y.z"));
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
- test("{$and:[{a: 1}, {b: null}]}",
- "{$and:[{a: 1}]}");
+} // namespace DocumentSourceUnwind
- test("{$and:[{a: 1}, {b: null, c:1}]}",
- "{$and:[{a: 1}, {c:1}]}");
+namespace DocumentSourceGeoNear {
+using mongo::DocumentSourceGeoNear;
+using mongo::DocumentSourceLimit;
- test("{$and:[{a: null}, {b: null}]}",
- "{}");
+class LimitCoalesce : public DocumentSourceCursor::Base {
+public:
+ void run() {
+ intrusive_ptr<DocumentSourceGeoNear> geoNear = DocumentSourceGeoNear::create(ctx());
- // explicit $or
- test("{$or:[{a: 1}]}",
- "{$or:[{a: 1}]}");
+ ASSERT_EQUALS(geoNear->getLimit(), 100);
- test("{$or:[{a: 1}, {b: null}]}",
- "{}");
+ ASSERT(geoNear->coalesce(DocumentSourceLimit::create(ctx(), 200)));
+ ASSERT_EQUALS(geoNear->getLimit(), 100);
- test("{$or:[{a: 1}, {b: null, c:1}]}",
- "{$or:[{a: 1}, {c:1}]}");
+ ASSERT(geoNear->coalesce(DocumentSourceLimit::create(ctx(), 50)));
+ ASSERT_EQUALS(geoNear->getLimit(), 50);
- test("{$or:[{a: null}, {b: null}]}",
- "{}");
+ ASSERT(geoNear->coalesce(DocumentSourceLimit::create(ctx(), 30)));
+ ASSERT_EQUALS(geoNear->getLimit(), 30);
+ }
+};
+} // namespace DocumentSourceGeoNear
+
+namespace DocumentSourceMatch {
+using mongo::DocumentSourceMatch;
+
+// Helpers to make a DocumentSourceMatch from a query object or json string
+intrusive_ptr<DocumentSourceMatch> makeMatch(const BSONObj& query) {
+ intrusive_ptr<DocumentSource> uncasted =
+ DocumentSourceMatch::createFromBson(BSON("$match" << query).firstElement(), NULL);
+ return dynamic_cast<DocumentSourceMatch*>(uncasted.get());
+}
+intrusive_ptr<DocumentSourceMatch> makeMatch(const string& queryJson) {
+ return makeMatch(fromjson(queryJson));
+}
+
+class RedactSafePortion {
+public:
+ void test(string input, string safePortion) {
+ try {
+ intrusive_ptr<DocumentSourceMatch> match = makeMatch(input);
+ ASSERT_EQUALS(match->redactSafePortion(), fromjson(safePortion));
+ } catch (...) {
+ unittest::log() << "Problem with redactSafePortion() of: " << input;
+ throw;
+ }
+ }
- test("{}",
- "{}");
+ void run() {
+ // Empty
+ test("{}", "{}");
- // $all and $in
- test("{a: {$all: [1, 0]}}",
- "{a: {$all: [1, 0]}}");
+ // Basic allowed things
+ test("{a:1}", "{a:1}");
- test("{a: {$all: [1, 0, null]}}",
- "{a: {$all: [1, 0]}}");
+ test("{a:'asdf'}", "{a:'asdf'}");
- test("{a: {$all: [{$elemMatch: {b:1}}]}}", // could be allowed but currently isn't
- "{}");
+ test("{a:/asdf/i}", "{a:/asdf/i}");
- test("{a: {$all: [1, 0, null]}}",
- "{a: {$all: [1, 0]}}");
+ test("{a: {$regex: 'adsf'}}", "{a: {$regex: 'adsf'}}");
- test("{a: {$in: [1, 0]}}",
- "{a: {$in: [1, 0]}}");
+ test("{a: {$regex: 'adsf', $options: 'i'}}", "{a: {$regex: 'adsf', $options: 'i'}}");
- test("{a: {$in: [1, 0, null]}}",
- "{}");
+ test("{a: {$mod: [1, 0]}}", "{a: {$mod: [1, 0]}}");
- {
- const char* comparisonOps[] = { "$gt", "$lt" , "$gte", "$lte", NULL};
- for (int i = 0; comparisonOps[i]; i++) {
- const char* op = comparisonOps[i];
- test(string("{a: {") + op + ": 1}}",
- string("{a: {") + op + ": 1}}");
+ test("{a: {$type: 1}}", "{a: {$type: 1}}");
- // $elemMatch takes direct expressions ...
- test(string("{a: {$elemMatch: {") + op + ": 1}}}",
- string("{a: {$elemMatch: {") + op + ": 1}}}");
+ // Basic disallowed things
+ test("{a: null}", "{}");
- // ... or top-level style full matches
- test(string("{a: {$elemMatch: {b: {") + op + ": 1}}}}",
- string("{a: {$elemMatch: {b: {") + op + ": 1}}}}");
+ test("{a: {}}", "{}");
- test(string("{a: {") + op + ": null}}",
- "{}");
-
- test(string("{a: {") + op + ": {}}}",
- "{}");
+ test("{a: []}", "{}");
- test(string("{a: {") + op + ": []}}",
- "{}");
+ test("{'a.0': 1}", "{}");
- test(string("{'a.0': {") + op + ": null}}",
- "{}");
+ test("{'a.0.b': 1}", "{}");
- test(string("{'a.0.b': {") + op + ": null}}",
- "{}");
- }
- }
- }
- };
-
- class Coalesce {
- public:
- void run() {
- intrusive_ptr<DocumentSourceMatch> match1 = makeMatch(BSON("a" << 1));
- intrusive_ptr<DocumentSourceMatch> match2 = makeMatch(BSON("b" << 1));
- intrusive_ptr<DocumentSourceMatch> match3 = makeMatch(BSON("c" << 1));
-
- // Check initial state
- ASSERT_EQUALS(match1->getQuery(), BSON("a" << 1));
- ASSERT_EQUALS(match2->getQuery(), BSON("b" << 1));
- ASSERT_EQUALS(match3->getQuery(), BSON("c" << 1));
-
- ASSERT(match1->coalesce(match2));
- ASSERT_EQUALS(match1->getQuery(), fromjson("{'$and': [{a:1}, {b:1}]}"));
-
- ASSERT(match1->coalesce(match3));
- ASSERT_EQUALS(match1->getQuery(), fromjson("{'$and': [{'$and': [{a:1}, {b:1}]},"
- "{c:1}]}"));
- }
- };
- } // namespace DocumentSourceMatch
+ test("{a: {$ne: 1}}", "{}");
- class All : public Suite {
- public:
- All() : Suite( "documentsource" ) {
- }
- void setupTests() {
- add<DocumentSourceClass::Deps>();
-
- add<DocumentSourceCursor::Empty>();
- add<DocumentSourceCursor::Iterate>();
- add<DocumentSourceCursor::Dispose>();
- add<DocumentSourceCursor::IterateDispose>();
- add<DocumentSourceCursor::LimitCoalesce>();
-
- add<DocumentSourceLimit::DisposeSource>();
- add<DocumentSourceLimit::DisposeSourceCascade>();
- add<DocumentSourceLimit::Dependencies>();
-
- add<DocumentSourceGroup::NonObject>();
- add<DocumentSourceGroup::EmptySpec>();
- add<DocumentSourceGroup::IdEmptyObject>();
- add<DocumentSourceGroup::IdObjectExpression>();
- add<DocumentSourceGroup::IdInvalidObjectExpression>();
- add<DocumentSourceGroup::TwoIdSpecs>();
- add<DocumentSourceGroup::IdEmptyString>();
- add<DocumentSourceGroup::IdStringConstant>();
- add<DocumentSourceGroup::IdFieldPath>();
- add<DocumentSourceGroup::IdInvalidFieldPath>();
- add<DocumentSourceGroup::IdNumericConstant>();
- add<DocumentSourceGroup::IdArrayConstant>();
- add<DocumentSourceGroup::IdRegularExpression>();
- add<DocumentSourceGroup::DollarAggregateFieldName>();
- add<DocumentSourceGroup::NonObjectAggregateSpec>();
- add<DocumentSourceGroup::EmptyObjectAggregateSpec>();
- add<DocumentSourceGroup::BadAccumulator>();
- add<DocumentSourceGroup::SumArray>();
- add<DocumentSourceGroup::MultipleAccumulatorsForAField>();
- add<DocumentSourceGroup::DuplicateAggregateFieldNames>();
- add<DocumentSourceGroup::AggregateObjectExpression>();
- add<DocumentSourceGroup::AggregateOperatorExpression>();
- add<DocumentSourceGroup::EmptyCollection>();
- add<DocumentSourceGroup::SingleDocument>();
- add<DocumentSourceGroup::TwoValuesSingleKey>();
- add<DocumentSourceGroup::TwoValuesTwoKeys>();
- add<DocumentSourceGroup::FourValuesTwoKeys>();
- add<DocumentSourceGroup::FourValuesTwoKeysTwoAccumulators>();
- add<DocumentSourceGroup::GroupNullUndefinedIds>();
- add<DocumentSourceGroup::ComplexId>();
- add<DocumentSourceGroup::UndefinedAccumulatorValue>();
- add<DocumentSourceGroup::RouterMerger>();
- add<DocumentSourceGroup::Dependencies>();
- add<DocumentSourceGroup::StringConstantIdAndAccumulatorExpressions>();
- add<DocumentSourceGroup::ArrayConstantAccumulatorExpression>();
-
- add<DocumentSourceProject::Inclusion>();
- add<DocumentSourceProject::Optimize>();
- add<DocumentSourceProject::NonObjectSpec>();
- add<DocumentSourceProject::EmptyObjectSpec>();
- add<DocumentSourceProject::TopLevelDollar>();
- add<DocumentSourceProject::InvalidSpec>();
- add<DocumentSourceProject::TwoDocuments>();
- add<DocumentSourceProject::Dependencies>();
-
- add<DocumentSourceSort::Empty>();
- add<DocumentSourceSort::SingleValue>();
- add<DocumentSourceSort::TwoValues>();
- add<DocumentSourceSort::NonObjectSpec>();
- add<DocumentSourceSort::EmptyObjectSpec>();
- add<DocumentSourceSort::NonNumberDirectionSpec>();
- add<DocumentSourceSort::InvalidNumberDirectionSpec>();
- add<DocumentSourceSort::DescendingOrder>();
- add<DocumentSourceSort::DottedSortField>();
- add<DocumentSourceSort::CompoundSortSpec>();
- add<DocumentSourceSort::CompoundSortSpecAlternateOrder>();
- add<DocumentSourceSort::CompoundSortSpecAlternateOrderSecondField>();
- add<DocumentSourceSort::InconsistentTypeSort>();
- add<DocumentSourceSort::MixedNumericSort>();
- add<DocumentSourceSort::MissingValue>();
- add<DocumentSourceSort::NullValue>();
- add<DocumentSourceSort::MissingObjectWithinArray>();
- add<DocumentSourceSort::ExtractArrayValues>();
- add<DocumentSourceSort::Dependencies>();
-
- add<DocumentSourceUnwind::Empty>();
- add<DocumentSourceUnwind::MissingField>();
- add<DocumentSourceUnwind::NullField>();
- add<DocumentSourceUnwind::EmptyArray>();
- add<DocumentSourceUnwind::UnexpectedNumber>();
- add<DocumentSourceUnwind::LaterUnexpectedNumber>();
- add<DocumentSourceUnwind::UnexpectedString>();
- add<DocumentSourceUnwind::UnexpectedObject>();
- add<DocumentSourceUnwind::UnwindOneValue>();
- add<DocumentSourceUnwind::UnwindTwoValues>();
- add<DocumentSourceUnwind::UnwindNull>();
- add<DocumentSourceUnwind::TwoDocuments>();
- add<DocumentSourceUnwind::NestedArray>();
- add<DocumentSourceUnwind::NonObjectParent>();
- add<DocumentSourceUnwind::DoubleNestedArray>();
- add<DocumentSourceUnwind::SeveralDocuments>();
- add<DocumentSourceUnwind::SeveralMoreDocuments>();
- add<DocumentSourceUnwind::Dependencies>();
-
- add<DocumentSourceGeoNear::LimitCoalesce>();
-
- add<DocumentSourceMatch::RedactSafePortion>();
- add<DocumentSourceMatch::Coalesce>();
+ test("{a: {$nin: [1, 2, 3]}}", "{}");
+
+ test("{a: {$exists: true}}", // could be allowed but currently isn't
+ "{}");
+
+ test("{a: {$exists: false}}", // can never be allowed
+ "{}");
+
+ test("{a: {$size: 1}}", "{}");
+
+ test("{$nor: [{a:1}]}", "{}");
+
+ // Combinations
+ test("{a:1, b: 'asdf'}", "{a:1, b: 'asdf'}");
+
+ test("{a:1, b: null}", "{a:1}");
+
+ test("{a:null, b: null}", "{}");
+
+ // $elemMatch
+
+ test("{a: {$elemMatch: {b: 1}}}", "{a: {$elemMatch: {b: 1}}}");
+
+ test("{a: {$elemMatch: {b:null}}}", "{}");
+
+ test("{a: {$elemMatch: {b:null, c:1}}}", "{a: {$elemMatch: {c: 1}}}");
+
+ // explicit $and
+ test("{$and:[{a: 1}]}", "{$and:[{a: 1}]}");
+
+ test("{$and:[{a: 1}, {b: null}]}", "{$and:[{a: 1}]}");
+
+ test("{$and:[{a: 1}, {b: null, c:1}]}", "{$and:[{a: 1}, {c:1}]}");
+
+ test("{$and:[{a: null}, {b: null}]}", "{}");
+
+ // explicit $or
+ test("{$or:[{a: 1}]}", "{$or:[{a: 1}]}");
+
+ test("{$or:[{a: 1}, {b: null}]}", "{}");
+
+ test("{$or:[{a: 1}, {b: null, c:1}]}", "{$or:[{a: 1}, {c:1}]}");
+
+ test("{$or:[{a: null}, {b: null}]}", "{}");
+
+ test("{}", "{}");
+
+ // $all and $in
+ test("{a: {$all: [1, 0]}}", "{a: {$all: [1, 0]}}");
+
+ test("{a: {$all: [1, 0, null]}}", "{a: {$all: [1, 0]}}");
+
+ test("{a: {$all: [{$elemMatch: {b:1}}]}}", // could be allowed but currently isn't
+ "{}");
+
+ test("{a: {$all: [1, 0, null]}}", "{a: {$all: [1, 0]}}");
+
+ test("{a: {$in: [1, 0]}}", "{a: {$in: [1, 0]}}");
+
+ test("{a: {$in: [1, 0, null]}}", "{}");
+
+ {
+ const char* comparisonOps[] = {"$gt", "$lt", "$gte", "$lte", NULL};
+ for (int i = 0; comparisonOps[i]; i++) {
+ const char* op = comparisonOps[i];
+ test(string("{a: {") + op + ": 1}}", string("{a: {") + op + ": 1}}");
+
+ // $elemMatch takes direct expressions ...
+ test(string("{a: {$elemMatch: {") + op + ": 1}}}",
+ string("{a: {$elemMatch: {") + op + ": 1}}}");
+
+ // ... or top-level style full matches
+ test(string("{a: {$elemMatch: {b: {") + op + ": 1}}}}",
+ string("{a: {$elemMatch: {b: {") + op + ": 1}}}}");
+
+ test(string("{a: {") + op + ": null}}", "{}");
+
+ test(string("{a: {") + op + ": {}}}", "{}");
+
+ test(string("{a: {") + op + ": []}}", "{}");
+
+ test(string("{'a.0': {") + op + ": null}}", "{}");
+
+ test(string("{'a.0.b': {") + op + ": null}}", "{}");
+ }
}
- };
+ }
+};
+
+class Coalesce {
+public:
+ void run() {
+ intrusive_ptr<DocumentSourceMatch> match1 = makeMatch(BSON("a" << 1));
+ intrusive_ptr<DocumentSourceMatch> match2 = makeMatch(BSON("b" << 1));
+ intrusive_ptr<DocumentSourceMatch> match3 = makeMatch(BSON("c" << 1));
+
+ // Check initial state
+ ASSERT_EQUALS(match1->getQuery(), BSON("a" << 1));
+ ASSERT_EQUALS(match2->getQuery(), BSON("b" << 1));
+ ASSERT_EQUALS(match3->getQuery(), BSON("c" << 1));
+
+ ASSERT(match1->coalesce(match2));
+ ASSERT_EQUALS(match1->getQuery(), fromjson("{'$and': [{a:1}, {b:1}]}"));
+
+ ASSERT(match1->coalesce(match3));
+ ASSERT_EQUALS(match1->getQuery(),
+ fromjson(
+ "{'$and': [{'$and': [{a:1}, {b:1}]},"
+ "{c:1}]}"));
+ }
+};
+} // namespace DocumentSourceMatch
+
+class All : public Suite {
+public:
+ All() : Suite("documentsource") {}
+ void setupTests() {
+ add<DocumentSourceClass::Deps>();
+
+ add<DocumentSourceCursor::Empty>();
+ add<DocumentSourceCursor::Iterate>();
+ add<DocumentSourceCursor::Dispose>();
+ add<DocumentSourceCursor::IterateDispose>();
+ add<DocumentSourceCursor::LimitCoalesce>();
+
+ add<DocumentSourceLimit::DisposeSource>();
+ add<DocumentSourceLimit::DisposeSourceCascade>();
+ add<DocumentSourceLimit::Dependencies>();
+
+ add<DocumentSourceGroup::NonObject>();
+ add<DocumentSourceGroup::EmptySpec>();
+ add<DocumentSourceGroup::IdEmptyObject>();
+ add<DocumentSourceGroup::IdObjectExpression>();
+ add<DocumentSourceGroup::IdInvalidObjectExpression>();
+ add<DocumentSourceGroup::TwoIdSpecs>();
+ add<DocumentSourceGroup::IdEmptyString>();
+ add<DocumentSourceGroup::IdStringConstant>();
+ add<DocumentSourceGroup::IdFieldPath>();
+ add<DocumentSourceGroup::IdInvalidFieldPath>();
+ add<DocumentSourceGroup::IdNumericConstant>();
+ add<DocumentSourceGroup::IdArrayConstant>();
+ add<DocumentSourceGroup::IdRegularExpression>();
+ add<DocumentSourceGroup::DollarAggregateFieldName>();
+ add<DocumentSourceGroup::NonObjectAggregateSpec>();
+ add<DocumentSourceGroup::EmptyObjectAggregateSpec>();
+ add<DocumentSourceGroup::BadAccumulator>();
+ add<DocumentSourceGroup::SumArray>();
+ add<DocumentSourceGroup::MultipleAccumulatorsForAField>();
+ add<DocumentSourceGroup::DuplicateAggregateFieldNames>();
+ add<DocumentSourceGroup::AggregateObjectExpression>();
+ add<DocumentSourceGroup::AggregateOperatorExpression>();
+ add<DocumentSourceGroup::EmptyCollection>();
+ add<DocumentSourceGroup::SingleDocument>();
+ add<DocumentSourceGroup::TwoValuesSingleKey>();
+ add<DocumentSourceGroup::TwoValuesTwoKeys>();
+ add<DocumentSourceGroup::FourValuesTwoKeys>();
+ add<DocumentSourceGroup::FourValuesTwoKeysTwoAccumulators>();
+ add<DocumentSourceGroup::GroupNullUndefinedIds>();
+ add<DocumentSourceGroup::ComplexId>();
+ add<DocumentSourceGroup::UndefinedAccumulatorValue>();
+ add<DocumentSourceGroup::RouterMerger>();
+ add<DocumentSourceGroup::Dependencies>();
+ add<DocumentSourceGroup::StringConstantIdAndAccumulatorExpressions>();
+ add<DocumentSourceGroup::ArrayConstantAccumulatorExpression>();
+
+ add<DocumentSourceProject::Inclusion>();
+ add<DocumentSourceProject::Optimize>();
+ add<DocumentSourceProject::NonObjectSpec>();
+ add<DocumentSourceProject::EmptyObjectSpec>();
+ add<DocumentSourceProject::TopLevelDollar>();
+ add<DocumentSourceProject::InvalidSpec>();
+ add<DocumentSourceProject::TwoDocuments>();
+ add<DocumentSourceProject::Dependencies>();
+
+ add<DocumentSourceSort::Empty>();
+ add<DocumentSourceSort::SingleValue>();
+ add<DocumentSourceSort::TwoValues>();
+ add<DocumentSourceSort::NonObjectSpec>();
+ add<DocumentSourceSort::EmptyObjectSpec>();
+ add<DocumentSourceSort::NonNumberDirectionSpec>();
+ add<DocumentSourceSort::InvalidNumberDirectionSpec>();
+ add<DocumentSourceSort::DescendingOrder>();
+ add<DocumentSourceSort::DottedSortField>();
+ add<DocumentSourceSort::CompoundSortSpec>();
+ add<DocumentSourceSort::CompoundSortSpecAlternateOrder>();
+ add<DocumentSourceSort::CompoundSortSpecAlternateOrderSecondField>();
+ add<DocumentSourceSort::InconsistentTypeSort>();
+ add<DocumentSourceSort::MixedNumericSort>();
+ add<DocumentSourceSort::MissingValue>();
+ add<DocumentSourceSort::NullValue>();
+ add<DocumentSourceSort::MissingObjectWithinArray>();
+ add<DocumentSourceSort::ExtractArrayValues>();
+ add<DocumentSourceSort::Dependencies>();
+
+ add<DocumentSourceUnwind::Empty>();
+ add<DocumentSourceUnwind::MissingField>();
+ add<DocumentSourceUnwind::NullField>();
+ add<DocumentSourceUnwind::EmptyArray>();
+ add<DocumentSourceUnwind::UnexpectedNumber>();
+ add<DocumentSourceUnwind::LaterUnexpectedNumber>();
+ add<DocumentSourceUnwind::UnexpectedString>();
+ add<DocumentSourceUnwind::UnexpectedObject>();
+ add<DocumentSourceUnwind::UnwindOneValue>();
+ add<DocumentSourceUnwind::UnwindTwoValues>();
+ add<DocumentSourceUnwind::UnwindNull>();
+ add<DocumentSourceUnwind::TwoDocuments>();
+ add<DocumentSourceUnwind::NestedArray>();
+ add<DocumentSourceUnwind::NonObjectParent>();
+ add<DocumentSourceUnwind::DoubleNestedArray>();
+ add<DocumentSourceUnwind::SeveralDocuments>();
+ add<DocumentSourceUnwind::SeveralMoreDocuments>();
+ add<DocumentSourceUnwind::Dependencies>();
+
+ add<DocumentSourceGeoNear::LimitCoalesce>();
+
+ add<DocumentSourceMatch::RedactSafePortion>();
+ add<DocumentSourceMatch::Coalesce>();
+ }
+};
- SuiteInstance<All> myall;
+SuiteInstance<All> myall;
-} // namespace DocumentSourceTests
+} // namespace DocumentSourceTests
diff --git a/src/mongo/dbtests/documenttests.cpp b/src/mongo/dbtests/documenttests.cpp
index 28e63419242..e53313b042d 100644
--- a/src/mongo/dbtests/documenttests.cpp
+++ b/src/mongo/dbtests/documenttests.cpp
@@ -38,1474 +38,1618 @@
namespace DocumentTests {
- using std::endl;
- using std::numeric_limits;
- using std::string;
- using std::vector;
+using std::endl;
+using std::numeric_limits;
+using std::string;
+using std::vector;
+
+mongo::Document::FieldPair getNthField(mongo::Document doc, size_t index) {
+ mongo::FieldIterator it(doc);
+ while (index--) // advance index times
+ it.next();
+ return it.next();
+}
+
+namespace Document {
+
+using mongo::Document;
+
+BSONObj toBson(const Document& document) {
+ return document.toBson();
+}
+
+Document fromBson(BSONObj obj) {
+ return Document(obj);
+}
+
+void assertRoundTrips(const Document& document1) {
+ BSONObj obj1 = toBson(document1);
+ Document document2 = fromBson(obj1);
+ BSONObj obj2 = toBson(document2);
+ ASSERT_EQUALS(obj1, obj2);
+ ASSERT_EQUALS(document1, document2);
+}
+
+/** Create a Document. */
+class Create {
+public:
+ void run() {
+ Document document;
+ ASSERT_EQUALS(0U, document.size());
+ assertRoundTrips(document);
+ }
+};
+
+/** Create a Document from a BSONObj. */
+class CreateFromBsonObj {
+public:
+ void run() {
+ Document document = fromBson(BSONObj());
+ ASSERT_EQUALS(0U, document.size());
+ document = fromBson(BSON("a" << 1 << "b"
+ << "q"));
+ ASSERT_EQUALS(2U, document.size());
+ ASSERT_EQUALS("a", getNthField(document, 0).first.toString());
+ ASSERT_EQUALS(1, getNthField(document, 0).second.getInt());
+ ASSERT_EQUALS("b", getNthField(document, 1).first.toString());
+ ASSERT_EQUALS("q", getNthField(document, 1).second.getString());
+ assertRoundTrips(document);
+ }
+};
+
+/** Add Document fields. */
+class AddField {
+public:
+ void run() {
+ MutableDocument md;
+ md.addField("foo", Value(1));
+ ASSERT_EQUALS(1U, md.peek().size());
+ ASSERT_EQUALS(1, md.peek()["foo"].getInt());
+ md.addField("bar", Value(99));
+ ASSERT_EQUALS(2U, md.peek().size());
+ ASSERT_EQUALS(99, md.peek()["bar"].getInt());
+ // No assertion is triggered by a duplicate field name.
+ md.addField("a", Value(5));
+
+ Document final = md.freeze();
+ ASSERT_EQUALS(3U, final.size());
+ assertRoundTrips(final);
+ }
+};
+
+/** Get Document values. */
+class GetValue {
+public:
+ void run() {
+ Document document = fromBson(BSON("a" << 1 << "b" << 2.2));
+ ASSERT_EQUALS(1, document["a"].getInt());
+ ASSERT_EQUALS(1, document["a"].getInt());
+ ASSERT_EQUALS(2.2, document["b"].getDouble());
+ ASSERT_EQUALS(2.2, document["b"].getDouble());
+ // Missing field.
+ ASSERT(document["c"].missing());
+ ASSERT(document["c"].missing());
+ assertRoundTrips(document);
+ }
+};
+
+/** Get Document fields. */
+class SetField {
+public:
+ void run() {
+ Document original = fromBson(BSON("a" << 1 << "b" << 2.2 << "c" << 99));
+
+ // Initial positions. Used at end of function to make sure nothing moved
+ const Position apos = original.positionOf("a");
+ const Position bpos = original.positionOf("c");
+ const Position cpos = original.positionOf("c");
+
+ MutableDocument md(original);
+
+ // Set the first field.
+ md.setField("a", Value("foo"));
+ ASSERT_EQUALS(3U, md.peek().size());
+ ASSERT_EQUALS("foo", md.peek()["a"].getString());
+ ASSERT_EQUALS("foo", getNthField(md.peek(), 0).second.getString());
+ assertRoundTrips(md.peek());
+ // Set the second field.
+ md["b"] = Value("bar");
+ ASSERT_EQUALS(3U, md.peek().size());
+ ASSERT_EQUALS("bar", md.peek()["b"].getString());
+ ASSERT_EQUALS("bar", getNthField(md.peek(), 1).second.getString());
+ assertRoundTrips(md.peek());
+
+ // Remove the second field.
+ md.setField("b", Value());
+ PRINT(md.peek().toString());
+ ASSERT_EQUALS(2U, md.peek().size());
+ ASSERT(md.peek()["b"].missing());
+ ASSERT_EQUALS("a", getNthField(md.peek(), 0).first.toString());
+ ASSERT_EQUALS("c", getNthField(md.peek(), 1).first.toString());
+ ASSERT_EQUALS(99, md.peek()["c"].getInt());
+ assertRoundTrips(md.peek());
+
+ // Remove the first field.
+ md["a"] = Value();
+ ASSERT_EQUALS(1U, md.peek().size());
+ ASSERT(md.peek()["a"].missing());
+ ASSERT_EQUALS("c", getNthField(md.peek(), 0).first.toString());
+ ASSERT_EQUALS(99, md.peek()["c"].getInt());
+ assertRoundTrips(md.peek());
+
+ // Remove the final field. Verify document is empty.
+ md.remove("c");
+ ASSERT(md.peek().empty());
+ ASSERT_EQUALS(0U, md.peek().size());
+ ASSERT_EQUALS(md.peek(), Document());
+ ASSERT(!FieldIterator(md.peek()).more());
+ ASSERT(md.peek()["c"].missing());
+ assertRoundTrips(md.peek());
+
+ // Set a nested field using []
+ md["x"]["y"]["z"] = Value("nested");
+ ASSERT_EQUALS(md.peek()["x"]["y"]["z"], Value("nested"));
+
+ // Set a nested field using setNestedField
+ FieldPath xxyyzz = string("xx.yy.zz");
+ md.setNestedField(xxyyzz, Value("nested"));
+ ASSERT_EQUALS(md.peek().getNestedField(xxyyzz), Value("nested"));
+
+ // Set a nested fields through an existing empty document
+ md["xxx"] = Value(Document());
+ md["xxx"]["yyy"] = Value(Document());
+ FieldPath xxxyyyzzz = string("xxx.yyy.zzz");
+ md.setNestedField(xxxyyyzzz, Value("nested"));
+ ASSERT_EQUALS(md.peek().getNestedField(xxxyyyzzz), Value("nested"));
+
+ // Make sure nothing moved
+ ASSERT_EQUALS(apos, md.peek().positionOf("a"));
+ ASSERT_EQUALS(bpos, md.peek().positionOf("c"));
+ ASSERT_EQUALS(cpos, md.peek().positionOf("c"));
+ ASSERT_EQUALS(Position(), md.peek().positionOf("d"));
+ }
+};
+
+/** Document comparator. */
+class Compare {
+public:
+ void run() {
+ assertComparison(0, BSONObj(), BSONObj());
+ assertComparison(0, BSON("a" << 1), BSON("a" << 1));
+ assertComparison(-1, BSONObj(), BSON("a" << 1));
+ assertComparison(-1, BSON("a" << 1), BSON("c" << 1));
+ assertComparison(0, BSON("a" << 1 << "r" << 2), BSON("a" << 1 << "r" << 2));
+ assertComparison(-1, BSON("a" << 1), BSON("a" << 1 << "r" << 2));
+ assertComparison(0, BSON("a" << 2), BSON("a" << 2));
+ assertComparison(-1, BSON("a" << 1), BSON("a" << 2));
+ assertComparison(-1, BSON("a" << 1 << "b" << 1), BSON("a" << 1 << "b" << 2));
+ // numbers sort before strings
+ assertComparison(-1,
+ BSON("a" << 1),
+ BSON("a"
+ << "foo"));
+ // numbers sort before strings, even if keys compare otherwise
+ assertComparison(-1,
+ BSON("b" << 1),
+ BSON("a"
+ << "foo"));
+ // null before number, even if keys compare otherwise
+ assertComparison(-1, BSON("z" << BSONNULL), BSON("a" << 1));
+ }
- mongo::Document::FieldPair getNthField(mongo::Document doc, size_t index) {
- mongo::FieldIterator it (doc);
- while (index--) // advance index times
- it.next();
- return it.next();
+public:
+ int cmp(const BSONObj& a, const BSONObj& b) {
+ int result = Document::compare(fromBson(a), fromBson(b));
+ return // sign
+ result < 0 ? -1 : result > 0 ? 1 : 0;
+ }
+ void assertComparison(int expectedResult, const BSONObj& a, const BSONObj& b) {
+ ASSERT_EQUALS(expectedResult, cmp(a, b));
+ ASSERT_EQUALS(-expectedResult, cmp(b, a));
+ if (expectedResult == 0) {
+ ASSERT_EQUALS(hash(a), hash(b));
+ }
+ }
+ size_t hash(const BSONObj& obj) {
+ size_t seed = 0x106e1e1;
+ Document(obj).hash_combine(seed);
+ return seed;
}
+};
- namespace Document {
+/** Shallow copy clone of a single field Document. */
+class Clone {
+public:
+ void run() {
+ const Document document = fromBson(BSON("a" << BSON("b" << 1)));
+ MutableDocument cloneOnDemand(document);
- using mongo::Document;
+ // Check equality.
+ ASSERT_EQUALS(document, cloneOnDemand.peek());
+ // Check pointer equality of sub document.
+ ASSERT_EQUALS(document["a"].getDocument().getPtr(),
+ cloneOnDemand.peek()["a"].getDocument().getPtr());
- BSONObj toBson( const Document& document ) {
- return document.toBson();
- }
- Document fromBson( BSONObj obj ) {
- return Document(obj);
- }
+ // Change field in clone and ensure the original document's field is unchanged.
+ cloneOnDemand.setField(StringData("a"), Value(2));
+ ASSERT_EQUALS(Value(1), document.getNestedField(FieldPath("a.b")));
- void assertRoundTrips( const Document& document1 ) {
- BSONObj obj1 = toBson( document1 );
- Document document2 = fromBson( obj1 );
- BSONObj obj2 = toBson( document2 );
- ASSERT_EQUALS( obj1, obj2 );
- ASSERT_EQUALS( document1, document2 );
- }
- /** Create a Document. */
- class Create {
- public:
- void run() {
- Document document;
- ASSERT_EQUALS( 0U, document.size() );
- assertRoundTrips( document );
- }
- };
-
- /** Create a Document from a BSONObj. */
- class CreateFromBsonObj {
- public:
- void run() {
- Document document = fromBson( BSONObj() );
- ASSERT_EQUALS( 0U, document.size() );
- document = fromBson( BSON( "a" << 1 << "b" << "q" ) );
- ASSERT_EQUALS( 2U, document.size() );
- ASSERT_EQUALS( "a", getNthField(document, 0).first.toString() );
- ASSERT_EQUALS( 1, getNthField(document, 0).second.getInt() );
- ASSERT_EQUALS( "b", getNthField(document, 1).first.toString() );
- ASSERT_EQUALS( "q", getNthField(document, 1).second.getString() );
- assertRoundTrips( document );
- }
- };
-
- /** Add Document fields. */
- class AddField {
- public:
- void run() {
- MutableDocument md;
- md.addField( "foo", Value( 1 ) );
- ASSERT_EQUALS( 1U, md.peek().size() );
- ASSERT_EQUALS( 1, md.peek()["foo"].getInt() );
- md.addField( "bar", Value( 99 ) );
- ASSERT_EQUALS( 2U, md.peek().size() );
- ASSERT_EQUALS( 99, md.peek()["bar"].getInt() );
- // No assertion is triggered by a duplicate field name.
- md.addField( "a", Value( 5 ) );
-
- Document final = md.freeze();
- ASSERT_EQUALS( 3U, final.size() );
- assertRoundTrips( final );
- }
- };
-
- /** Get Document values. */
- class GetValue {
- public:
- void run() {
- Document document = fromBson( BSON( "a" << 1 << "b" << 2.2 ) );
- ASSERT_EQUALS( 1, document["a"].getInt() );
- ASSERT_EQUALS( 1, document["a"].getInt() );
- ASSERT_EQUALS( 2.2, document["b"].getDouble() );
- ASSERT_EQUALS( 2.2, document["b"].getDouble() );
- // Missing field.
- ASSERT( document["c"].missing() );
- ASSERT( document["c"].missing() );
- assertRoundTrips( document );
- }
- };
-
- /** Get Document fields. */
- class SetField {
- public:
- void run() {
- Document original = fromBson(BSON("a" << 1 << "b" << 2.2 << "c" << 99));
-
- // Initial positions. Used at end of function to make sure nothing moved
- const Position apos = original.positionOf("a");
- const Position bpos = original.positionOf("c");
- const Position cpos = original.positionOf("c");
-
- MutableDocument md (original);
-
- // Set the first field.
- md.setField( "a" , Value( "foo" ) );
- ASSERT_EQUALS( 3U, md.peek().size() );
- ASSERT_EQUALS( "foo", md.peek()["a"].getString() );
- ASSERT_EQUALS( "foo", getNthField(md.peek(), 0).second.getString() );
- assertRoundTrips( md.peek() );
- // Set the second field.
- md["b"] = Value("bar");
- ASSERT_EQUALS( 3U, md.peek().size() );
- ASSERT_EQUALS( "bar", md.peek()["b"].getString() );
- ASSERT_EQUALS( "bar", getNthField(md.peek(), 1).second.getString() );
- assertRoundTrips( md.peek() );
-
- // Remove the second field.
- md.setField("b", Value());
- PRINT(md.peek().toString());
- ASSERT_EQUALS( 2U, md.peek().size() );
- ASSERT( md.peek()["b"].missing() );
- ASSERT_EQUALS( "a", getNthField(md.peek(), 0 ).first.toString() );
- ASSERT_EQUALS( "c", getNthField(md.peek(), 1 ).first.toString() );
- ASSERT_EQUALS( 99, md.peek()["c"].getInt() );
- assertRoundTrips( md.peek() );
-
- // Remove the first field.
- md["a"] = Value();
- ASSERT_EQUALS( 1U, md.peek().size() );
- ASSERT( md.peek()["a"].missing() );
- ASSERT_EQUALS( "c", getNthField(md.peek(), 0 ).first.toString() );
- ASSERT_EQUALS( 99, md.peek()["c"].getInt() );
- assertRoundTrips( md.peek() );
-
- // Remove the final field. Verify document is empty.
- md.remove("c");
- ASSERT( md.peek().empty() );
- ASSERT_EQUALS( 0U, md.peek().size() );
- ASSERT_EQUALS( md.peek(), Document() );
- ASSERT( !FieldIterator(md.peek()).more() );
- ASSERT( md.peek()["c"].missing() );
- assertRoundTrips( md.peek() );
-
- // Set a nested field using []
- md["x"]["y"]["z"] = Value("nested");
- ASSERT_EQUALS(md.peek()["x"]["y"]["z"], Value("nested"));
-
- // Set a nested field using setNestedField
- FieldPath xxyyzz = string("xx.yy.zz");
- md.setNestedField(xxyyzz, Value("nested"));
- ASSERT_EQUALS(md.peek().getNestedField(xxyyzz), Value("nested") );
-
- // Set a nested fields through an existing empty document
- md["xxx"] = Value(Document());
- md["xxx"]["yyy"] = Value(Document());
- FieldPath xxxyyyzzz = string("xxx.yyy.zzz");
- md.setNestedField(xxxyyyzzz, Value("nested"));
- ASSERT_EQUALS(md.peek().getNestedField(xxxyyyzzz), Value("nested") );
-
- // Make sure nothing moved
- ASSERT_EQUALS(apos, md.peek().positionOf("a"));
- ASSERT_EQUALS(bpos, md.peek().positionOf("c"));
- ASSERT_EQUALS(cpos, md.peek().positionOf("c"));
- ASSERT_EQUALS(Position(), md.peek().positionOf("d"));
- }
- };
-
- /** Document comparator. */
- class Compare {
- public:
- void run() {
- assertComparison( 0, BSONObj(), BSONObj() );
- assertComparison( 0, BSON( "a" << 1 ), BSON( "a" << 1 ) );
- assertComparison( -1, BSONObj(), BSON( "a" << 1 ) );
- assertComparison( -1, BSON( "a" << 1 ), BSON( "c" << 1 ) );
- assertComparison( 0, BSON( "a" << 1 << "r" << 2 ), BSON( "a" << 1 << "r" << 2 ) );
- assertComparison( -1, BSON( "a" << 1 ), BSON( "a" << 1 << "r" << 2 ) );
- assertComparison( 0, BSON( "a" << 2 ), BSON( "a" << 2 ) );
- assertComparison( -1, BSON( "a" << 1 ), BSON( "a" << 2 ) );
- assertComparison( -1, BSON( "a" << 1 << "b" << 1 ), BSON( "a" << 1 << "b" << 2 ) );
- // numbers sort before strings
- assertComparison( -1, BSON( "a" << 1 ), BSON( "a" << "foo" ) );
- // numbers sort before strings, even if keys compare otherwise
- assertComparison( -1, BSON( "b" << 1 ), BSON( "a" << "foo" ) );
- // null before number, even if keys compare otherwise
- assertComparison( -1, BSON( "z" << BSONNULL ), BSON( "a" << 1 ) );
- }
- public:
- int cmp( const BSONObj& a, const BSONObj& b ) {
- int result = Document::compare( fromBson( a ), fromBson( b ) );
- return // sign
- result < 0 ? -1 :
- result > 0 ? 1 :
- 0;
- }
- void assertComparison( int expectedResult, const BSONObj& a, const BSONObj& b ) {
- ASSERT_EQUALS( expectedResult, cmp( a, b ) );
- ASSERT_EQUALS( -expectedResult, cmp( b, a ) );
- if ( expectedResult == 0 ) {
- ASSERT_EQUALS( hash( a ), hash( b ) );
- }
- }
- size_t hash( const BSONObj& obj ) {
- size_t seed = 0x106e1e1;
- Document(obj).hash_combine(seed);
- return seed;
- }
- };
-
- /** Shallow copy clone of a single field Document. */
- class Clone {
- public:
- void run() {
- const Document document = fromBson( BSON( "a" << BSON( "b" << 1 ) ) );
- MutableDocument cloneOnDemand (document);
-
- // Check equality.
- ASSERT_EQUALS(document, cloneOnDemand.peek());
- // Check pointer equality of sub document.
- ASSERT_EQUALS( document["a"].getDocument().getPtr(),
- cloneOnDemand.peek()["a"].getDocument().getPtr() );
-
-
- // Change field in clone and ensure the original document's field is unchanged.
- cloneOnDemand.setField( StringData("a"), Value(2) );
- ASSERT_EQUALS( Value(1), document.getNestedField(FieldPath("a.b")) );
-
-
- // setNestedField and ensure the original document is unchanged.
-
- cloneOnDemand.reset(document);
- vector<Position> path;
- ASSERT_EQUALS( Value(1), document.getNestedField(FieldPath("a.b"), &path) );
-
- cloneOnDemand.setNestedField(path, Value(2));
-
- ASSERT_EQUALS( Value(1), document.getNestedField(FieldPath("a.b")) );
- ASSERT_EQUALS( Value(2), cloneOnDemand.peek().getNestedField(FieldPath("a.b")) );
- ASSERT_EQUALS( DOC( "a" << DOC( "b" << 1 ) ), document );
- ASSERT_EQUALS( DOC( "a" << DOC( "b" << 2 ) ), cloneOnDemand.freeze() );
- }
- };
-
- /** Shallow copy clone of a multi field Document. */
- class CloneMultipleFields {
- public:
- void run() {
- Document document =
- fromBson( fromjson( "{a:1,b:['ra',4],c:{z:1},d:'lal'}" ) );
- Document clonedDocument = document.clone();
- ASSERT_EQUALS(document, clonedDocument);
- }
- };
-
- /** FieldIterator for an empty Document. */
- class FieldIteratorEmpty {
- public:
- void run() {
- FieldIterator iterator ( (Document()) );
- ASSERT( !iterator.more() );
- }
- };
-
- /** FieldIterator for a single field Document. */
- class FieldIteratorSingle {
- public:
- void run() {
- FieldIterator iterator (fromBson( BSON( "a" << 1 ) ));
- ASSERT( iterator.more() );
- Document::FieldPair field = iterator.next();
- ASSERT_EQUALS( "a", field.first.toString() );
- ASSERT_EQUALS( 1, field.second.getInt() );
- ASSERT( !iterator.more() );
- }
- };
-
- /** FieldIterator for a multiple field Document. */
- class FieldIteratorMultiple {
- public:
- void run() {
- FieldIterator iterator (fromBson( BSON( "a" << 1 << "b" << 5.6 << "c" << "z" )));
- ASSERT( iterator.more() );
- Document::FieldPair field = iterator.next();
- ASSERT_EQUALS( "a", field.first.toString() );
- ASSERT_EQUALS( 1, field.second.getInt() );
- ASSERT( iterator.more() );
-
- Document::FieldPair field2 = iterator.next();
- ASSERT_EQUALS( "b", field2.first.toString() );
- ASSERT_EQUALS( 5.6, field2.second.getDouble() );
- ASSERT( iterator.more() );
-
- Document::FieldPair field3 = iterator.next();
- ASSERT_EQUALS( "c", field3.first.toString() );
- ASSERT_EQUALS( "z", field3.second.getString() );
- ASSERT( !iterator.more() );
- }
- };
-
- class AllTypesDoc {
- public:
- void run() {
- // These are listed in order of BSONType with some duplicates
- append("minkey", MINKEY);
- // EOO not valid in middle of BSONObj
- append("double", 1.0);
- append("c-string", "string\0after NUL"); // after NULL is ignored
- append("c++", StringData("string\0after NUL", StringData::LiteralTag()).toString());
- append("StringData", StringData("string\0after NUL", StringData::LiteralTag()));
- append("emptyObj", BSONObj());
- append("filledObj", BSON("a" << 1));
- append("emptyArray", BSON("" << BSONArray()).firstElement());
- append("filledArray", BSON("" << BSON_ARRAY(1 << "a")).firstElement());
- append("binData", BSONBinData("a\0b", 3, BinDataGeneral));
- append("binDataCustom", BSONBinData("a\0b", 3, bdtCustom));
- append("binDataUUID", BSONBinData("123456789\0abcdef", 16, bdtUUID));
- append("undefined", BSONUndefined);
- append("oid", OID());
- append("true", true);
- append("false", false);
- append("date", jsTime());
- append("null", BSONNULL);
- append("regex", BSONRegEx(".*"));
- append("regexFlags", BSONRegEx(".*", "i"));
- append("regexEmpty", BSONRegEx("", ""));
- append("dbref", BSONDBRef("foo", OID()));
- append("code", BSONCode("function() {}"));
- append("codeNul", BSONCode(StringData("var nul = '\0'", StringData::LiteralTag())));
- append("symbol", BSONSymbol("foo"));
- append("symbolNul", BSONSymbol(StringData("f\0o", StringData::LiteralTag())));
- append("codeWScope", BSONCodeWScope("asdf", BSONObj()));
- append("codeWScopeWScope", BSONCodeWScope("asdf", BSON("one" << 1)));
- append("int", 1);
- append("timestamp", OpTime());
- append("long", 1LL);
- append("very long", 1LL << 40);
- append("maxkey", MAXKEY);
-
- const BSONArray arr = arrBuilder.arr();
-
- // can't use append any more since arrBuilder is done
- objBuilder << "mega array" << arr;
- docBuilder["mega array"] = mongo::Value(values);
-
- const BSONObj obj = objBuilder.obj();
- const Document doc = docBuilder.freeze();
-
- const BSONObj obj2 = toBson(doc);
- const Document doc2 = fromBson(obj);
-
- // logical equality
- ASSERT_EQUALS(obj, obj2);
- ASSERT_EQUALS(doc, doc2);
-
- // binary equality
- ASSERT_EQUALS(obj.objsize(), obj2.objsize());
- ASSERT_EQUALS(memcmp(obj.objdata(), obj2.objdata(), obj.objsize()), 0);
-
- // ensure sorter serialization round-trips correctly
- BufBuilder bb;
- doc.serializeForSorter(bb);
- BufReader reader(bb.buf(), bb.len());
- const Document doc3 = Document::deserializeForSorter(
- reader, Document::SorterDeserializeSettings());
- BSONObj obj3 = toBson(doc3);
- ASSERT_EQUALS(obj.objsize(), obj3.objsize());
- ASSERT_EQUALS(memcmp(obj.objdata(), obj3.objdata(), obj.objsize()), 0);
- }
-
- template <typename T>
- void append(const char* name, const T& thing) {
- objBuilder << name << thing;
- arrBuilder << thing;
- docBuilder[name] = mongo::Value(thing);
- values.push_back(mongo::Value(thing));
- }
-
- vector<mongo::Value> values;
- MutableDocument docBuilder;
- BSONObjBuilder objBuilder;
- BSONArrayBuilder arrBuilder;
- };
- } // namespace Document
-
- namespace Value {
-
- using mongo::Value;
-
- BSONObj toBson( const Value& value ) {
- if (value.missing())
- return BSONObj(); // EOO
-
- BSONObjBuilder bob;
- value.addToBsonObj( &bob, "" );
- return bob.obj();
- }
+ // setNestedField and ensure the original document is unchanged.
- Value fromBson( const BSONObj& obj ) {
- BSONElement element = obj.firstElement();
- return Value( element );
- }
+ cloneOnDemand.reset(document);
+ vector<Position> path;
+ ASSERT_EQUALS(Value(1), document.getNestedField(FieldPath("a.b"), &path));
- void assertRoundTrips( const Value& value1 ) {
- BSONObj obj1 = toBson( value1 );
- Value value2 = fromBson( obj1 );
- BSONObj obj2 = toBson( value2 );
- ASSERT_EQUALS( obj1, obj2 );
- ASSERT_EQUALS(value1, value2);
- ASSERT_EQUALS(value1.getType(), value2.getType());
- }
+ cloneOnDemand.setNestedField(path, Value(2));
- class BSONArrayTest {
- public:
- void run() {
- ASSERT_EQUALS(Value(BSON_ARRAY(1 << 2 << 3)), DOC_ARRAY(1 << 2 << 3));
- ASSERT_EQUALS(Value(BSONArray()), Value(vector<Value>()));
- }
- };
-
- /** Int type. */
- class Int {
- public:
- void run() {
- Value value = Value( 5 );
- ASSERT_EQUALS( 5, value.getInt() );
- ASSERT_EQUALS( 5, value.getLong() );
- ASSERT_EQUALS( 5, value.getDouble() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Long type. */
- class Long {
- public:
- void run() {
- Value value = Value( 99LL );
- ASSERT_EQUALS( 99, value.getLong() );
- ASSERT_EQUALS( 99, value.getDouble() );
- ASSERT_EQUALS( NumberLong, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Double type. */
- class Double {
- public:
- void run() {
- Value value = Value( 5.5 );
- ASSERT_EQUALS( 5.5, value.getDouble() );
- ASSERT_EQUALS( NumberDouble, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** String type. */
- class String {
- public:
- void run() {
- Value value = Value( "foo" );
- ASSERT_EQUALS( "foo", value.getString() );
- ASSERT_EQUALS( mongo::String, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** String with a null character. */
- class StringWithNull {
- public:
- void run() {
- string withNull( "a\0b", 3 );
- BSONObj objWithNull = BSON( "" << withNull );
- ASSERT_EQUALS( withNull, objWithNull[ "" ].str() );
- Value value = fromBson( objWithNull );
- ASSERT_EQUALS( withNull, value.getString() );
- assertRoundTrips( value );
- }
- };
-
- /** Date type. */
- class Date {
- public:
- void run() {
- Value value = Value(Date_t(999));
- ASSERT_EQUALS( 999, value.getDate() );
- ASSERT_EQUALS( mongo::Date, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Timestamp type. */
- class Timestamp {
- public:
- void run() {
- Value value = Value( OpTime( 777 ) );
- ASSERT( OpTime( 777 ) == value.getTimestamp() );
- ASSERT_EQUALS( mongo::Timestamp, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Document with no fields. */
- class EmptyDocument {
- public:
- void run() {
- mongo::Document document = mongo::Document();
- Value value = Value( document );
- ASSERT_EQUALS( document.getPtr(), value.getDocument().getPtr() );
- ASSERT_EQUALS( Object, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Document type. */
- class Document {
- public:
- void run() {
- mongo::MutableDocument md;
- md.addField( "a", Value( 5 ) );
- md.addField( "apple", Value( "rrr" ) );
- md.addField( "banana", Value( -.3 ) );
- mongo::Document document = md.freeze();
-
- Value value = Value( document );
- // Check document pointers are equal.
- ASSERT_EQUALS( document.getPtr(), value.getDocument().getPtr() );
- // Check document contents.
- ASSERT_EQUALS( 5, document["a"].getInt() );
- ASSERT_EQUALS( "rrr", document["apple"].getString() );
- ASSERT_EQUALS( -.3, document["banana"].getDouble() );
- ASSERT_EQUALS( Object, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Array with no elements. */
- class EmptyArray {
- public:
- void run() {
- vector<Value> array;
- Value value (array);
- const vector<Value>& array2 = value.getArray();
-
- ASSERT( array2.empty() );
- ASSERT_EQUALS( Array, value.getType() );
- ASSERT_EQUALS( 0U, value.getArrayLength() );
- assertRoundTrips( value );
- }
- };
-
- /** Array type. */
- class Array {
- public:
- void run() {
- vector<Value> array;
- array.push_back( Value( 5 ) );
- array.push_back( Value( "lala" ) );
- array.push_back( Value( 3.14 ) );
- Value value = Value( array );
- const vector<Value>& array2 = value.getArray();
-
- ASSERT( !array2.empty() );
- ASSERT_EQUALS( array2.size(), 3U);
- ASSERT_EQUALS( 5, array2[0].getInt() );
- ASSERT_EQUALS( "lala", array2[1].getString() );
- ASSERT_EQUALS( 3.14, array2[2].getDouble() );
- ASSERT_EQUALS( mongo::Array, value.getType() );
- ASSERT_EQUALS( 3U, value.getArrayLength() );
- assertRoundTrips( value );
- }
- };
-
- /** Oid type. */
- class Oid {
- public:
- void run() {
- Value value =
- fromBson( BSON( "" << OID( "abcdefabcdefabcdefabcdef" ) ) );
- ASSERT_EQUALS( OID( "abcdefabcdefabcdefabcdef" ), value.getOid() );
- ASSERT_EQUALS( jstOID, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Bool type. */
- class Bool {
- public:
- void run() {
- Value value = fromBson( BSON( "" << true ) );
- ASSERT_EQUALS( true, value.getBool() );
- ASSERT_EQUALS( mongo::Bool, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Regex type. */
- class Regex {
- public:
- void run() {
- Value value = fromBson( fromjson( "{'':/abc/}" ) );
- ASSERT_EQUALS( string("abc"), value.getRegex() );
- ASSERT_EQUALS( RegEx, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Symbol type (currently unsupported). */
- class Symbol {
- public:
- void run() {
- Value value (BSONSymbol("FOOBAR"));
- ASSERT_EQUALS( "FOOBAR", value.getSymbol() );
- ASSERT_EQUALS( mongo::Symbol, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Undefined type. */
- class Undefined {
- public:
- void run() {
- Value value = Value(BSONUndefined);
- ASSERT_EQUALS( mongo::Undefined, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Null type. */
- class Null {
- public:
- void run() {
- Value value = Value(BSONNULL);
- ASSERT_EQUALS( jstNULL, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** True value. */
- class True {
- public:
- void run() {
- Value value = Value(true);
- ASSERT_EQUALS( true, value.getBool() );
- ASSERT_EQUALS( mongo::Bool, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** False value. */
- class False {
- public:
- void run() {
- Value value = Value(false);
- ASSERT_EQUALS( false, value.getBool() );
- ASSERT_EQUALS( mongo::Bool, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** -1 value. */
- class MinusOne {
- public:
- void run() {
- Value value = Value(-1);
- ASSERT_EQUALS( -1, value.getInt() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** 0 value. */
- class Zero {
- public:
- void run() {
- Value value = Value(0);
- ASSERT_EQUALS( 0, value.getInt() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** 1 value. */
- class One {
- public:
- void run() {
- Value value = Value(1);
- ASSERT_EQUALS( 1, value.getInt() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- namespace Coerce {
-
- class ToBoolBase {
- public:
- virtual ~ToBoolBase() {
- }
- void run() {
- ASSERT_EQUALS( expected(), value().coerceToBool() );
- }
- protected:
- virtual Value value() = 0;
- virtual bool expected() = 0;
- };
-
- class ToBoolTrue : public ToBoolBase {
- bool expected() { return true; }
- };
-
- class ToBoolFalse : public ToBoolBase {
- bool expected() { return false; }
- };
-
- /** Coerce 0 to bool. */
- class ZeroIntToBool : public ToBoolFalse {
- Value value() { return Value( 0 ); }
- };
-
- /** Coerce -1 to bool. */
- class NonZeroIntToBool : public ToBoolTrue {
- Value value() { return Value( -1 ); }
- };
-
- /** Coerce 0LL to bool. */
- class ZeroLongToBool : public ToBoolFalse {
- Value value() { return Value( 0LL ); }
- };
-
- /** Coerce 5LL to bool. */
- class NonZeroLongToBool : public ToBoolTrue {
- Value value() { return Value( 5LL ); }
- };
-
- /** Coerce 0.0 to bool. */
- class ZeroDoubleToBool : public ToBoolFalse {
- Value value() { return Value( 0 ); }
- };
-
- /** Coerce -1.3 to bool. */
- class NonZeroDoubleToBool : public ToBoolTrue {
- Value value() { return Value( -1.3 ); }
- };
-
- /** Coerce "" to bool. */
- class StringToBool : public ToBoolTrue {
- Value value() { return Value( "" ); }
- };
-
- /** Coerce {} to bool. */
- class ObjectToBool : public ToBoolTrue {
- Value value() {
- return Value( mongo::Document() );
- }
- };
-
- /** Coerce [] to bool. */
- class ArrayToBool : public ToBoolTrue {
- Value value() {
- return Value( vector<Value>() );
- }
- };
-
- /** Coerce Date(0) to bool. */
- class DateToBool : public ToBoolTrue {
- Value value() { return Value(Date_t(0)); }
- };
-
- /** Coerce js literal regex to bool. */
- class RegexToBool : public ToBoolTrue {
- Value value() { return fromBson( fromjson( "{''://}" ) ); }
- };
-
- /** Coerce true to bool. */
- class TrueToBool : public ToBoolTrue {
- Value value() { return fromBson( BSON( "" << true ) ); }
- };
-
- /** Coerce false to bool. */
- class FalseToBool : public ToBoolFalse {
- Value value() { return fromBson( BSON( "" << false ) ); }
- };
-
- /** Coerce null to bool. */
- class NullToBool : public ToBoolFalse {
- Value value() { return Value(BSONNULL); }
- };
-
- /** Coerce undefined to bool. */
- class UndefinedToBool : public ToBoolFalse {
- Value value() { return Value(BSONUndefined); }
- };
-
- class ToIntBase {
- public:
- virtual ~ToIntBase() {
- }
- void run() {
- if (asserts())
- ASSERT_THROWS( value().coerceToInt(), UserException );
- else
- ASSERT_EQUALS( expected(), value().coerceToInt() );
- }
- protected:
- virtual Value value() = 0;
- virtual int expected() { return 0; }
- virtual bool asserts() { return false; }
- };
-
- /** Coerce -5 to int. */
- class IntToInt : public ToIntBase {
- Value value() { return Value( -5 ); }
- int expected() { return -5; }
- };
-
- /** Coerce long to int. */
- class LongToInt : public ToIntBase {
- Value value() { return Value( 0xff00000007LL ); }
- int expected() { return 7; }
- };
-
- /** Coerce 9.8 to int. */
- class DoubleToInt : public ToIntBase {
- Value value() { return Value( 9.8 ); }
- int expected() { return 9; }
- };
-
- /** Coerce null to int. */
- class NullToInt : public ToIntBase {
- Value value() { return Value(BSONNULL); }
- bool asserts() { return true; }
- };
-
- /** Coerce undefined to int. */
- class UndefinedToInt : public ToIntBase {
- Value value() { return Value(BSONUndefined); }
- bool asserts() { return true; }
- };
-
- /** Coerce "" to int unsupported. */
- class StringToInt {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToInt(), UserException );
- }
- };
-
- class ToLongBase {
- public:
- virtual ~ToLongBase() {
- }
- void run() {
- if (asserts())
- ASSERT_THROWS( value().coerceToLong(), UserException );
- else
- ASSERT_EQUALS( expected(), value().coerceToLong() );
- }
- protected:
- virtual Value value() = 0;
- virtual long long expected() { return 0; }
- virtual bool asserts() { return false; }
- };
-
- /** Coerce -5 to long. */
- class IntToLong : public ToLongBase {
- Value value() { return Value( -5 ); }
- long long expected() { return -5; }
- };
-
- /** Coerce long to long. */
- class LongToLong : public ToLongBase {
- Value value() { return Value( 0xff00000007LL ); }
- long long expected() { return 0xff00000007LL; }
- };
-
- /** Coerce 9.8 to long. */
- class DoubleToLong : public ToLongBase {
- Value value() { return Value( 9.8 ); }
- long long expected() { return 9; }
- };
-
- /** Coerce null to long. */
- class NullToLong : public ToLongBase {
- Value value() { return Value(BSONNULL); }
- bool asserts() { return true; }
- };
-
- /** Coerce undefined to long. */
- class UndefinedToLong : public ToLongBase {
- Value value() { return Value(BSONUndefined); }
- bool asserts() { return true; }
- };
-
- /** Coerce string to long unsupported. */
- class StringToLong {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToLong(), UserException );
- }
- };
-
- class ToDoubleBase {
- public:
- virtual ~ToDoubleBase() {
- }
- void run() {
- if (asserts())
- ASSERT_THROWS( value().coerceToDouble(), UserException );
- else
- ASSERT_EQUALS( expected(), value().coerceToDouble() );
- }
- protected:
- virtual Value value() = 0;
- virtual double expected() { return 0; }
- virtual bool asserts() { return false; }
- };
-
- /** Coerce -5 to double. */
- class IntToDouble : public ToDoubleBase {
- Value value() { return Value( -5 ); }
- double expected() { return -5; }
- };
-
- /** Coerce long to double. */
- class LongToDouble : public ToDoubleBase {
- Value value() {
- // A long that cannot be exactly represented as a double.
- return Value( static_cast<double>( 0x8fffffffffffffffLL ) );
- }
- double expected() { return static_cast<double>( 0x8fffffffffffffffLL ); }
- };
-
- /** Coerce double to double. */
- class DoubleToDouble : public ToDoubleBase {
- Value value() { return Value( 9.8 ); }
- double expected() { return 9.8; }
- };
-
- /** Coerce null to double. */
- class NullToDouble : public ToDoubleBase {
- Value value() { return Value(BSONNULL); }
- bool asserts() { return true; }
- };
-
- /** Coerce undefined to double. */
- class UndefinedToDouble : public ToDoubleBase {
- Value value() { return Value(BSONUndefined); }
- bool asserts() { return true; }
- };
-
- /** Coerce string to double unsupported. */
- class StringToDouble {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToDouble(), UserException );
- }
- };
-
- class ToDateBase {
- public:
- virtual ~ToDateBase() {
- }
- void run() {
- ASSERT_EQUALS( expected(), value().coerceToDate() );
- }
- protected:
- virtual Value value() = 0;
- virtual long long expected() = 0;
- };
-
- /** Coerce date to date. */
- class DateToDate : public ToDateBase {
- Value value() { return Value(Date_t(888)); }
- long long expected() { return 888; }
- };
-
- /**
- * Convert timestamp to date. This extracts the time portion of the timestamp, which
- * is different from BSON behavior of interpreting all bytes as a date.
- */
- class TimestampToDate : public ToDateBase {
- Value value() {
- return Value( OpTime( 777, 666 ) );
- }
- long long expected() { return 777 * 1000; }
- };
-
- /** Coerce string to date unsupported. */
- class StringToDate {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToDate(), UserException );
- }
- };
-
- class ToStringBase {
- public:
- virtual ~ToStringBase() {
- }
- void run() {
- ASSERT_EQUALS( expected(), value().coerceToString() );
- }
- protected:
- virtual Value value() = 0;
- virtual string expected() { return ""; }
- };
-
- /** Coerce -0.2 to string. */
- class DoubleToString : public ToStringBase {
- Value value() { return Value( -0.2 ); }
- string expected() { return "-0.2"; }
- };
-
- /** Coerce -4 to string. */
- class IntToString : public ToStringBase {
- Value value() { return Value( -4 ); }
- string expected() { return "-4"; }
- };
-
- /** Coerce 10000LL to string. */
- class LongToString : public ToStringBase {
- Value value() { return Value( 10000LL ); }
- string expected() { return "10000"; }
- };
-
- /** Coerce string to string. */
- class StringToString : public ToStringBase {
- Value value() { return Value( "fO_o" ); }
- string expected() { return "fO_o"; }
- };
-
- /** Coerce timestamp to string. */
- class TimestampToString : public ToStringBase {
- Value value() {
- return Value( OpTime( 1, 2 ) );
- }
- string expected() { return OpTime( 1, 2 ).toStringPretty(); }
- };
-
- /** Coerce date to string. */
- class DateToString : public ToStringBase {
- Value value() { return Value(Date_t(1234567890LL*1000)); }
- string expected() { return "2009-02-13T23:31:30"; } // from js
- };
-
- /** Coerce null to string. */
- class NullToString : public ToStringBase {
- Value value() { return Value(BSONNULL); }
- };
-
- /** Coerce undefined to string. */
- class UndefinedToString : public ToStringBase {
- Value value() { return Value(BSONUndefined); }
- };
-
- /** Coerce document to string unsupported. */
- class DocumentToString {
- public:
- void run() {
- ASSERT_THROWS( Value
- ( mongo::Document() ).coerceToString(),
- UserException );
- }
- };
-
- /** Coerce timestamp to timestamp. */
- class TimestampToTimestamp {
- public:
- void run() {
- Value value = Value( OpTime( 1010 ) );
- ASSERT( OpTime( 1010 ) == value.coerceToTimestamp() );
- }
- };
-
- /** Coerce date to timestamp unsupported. */
- class DateToTimestamp {
- public:
- void run() {
- ASSERT_THROWS( Value(Date_t(1010)).coerceToTimestamp(),
- UserException );
- }
- };
-
- } // namespace Coerce
-
- /** Get the "widest" of two numeric types. */
- class GetWidestNumeric {
- public:
- void run() {
- using mongo::Undefined;
-
- // Numeric types.
- assertWidest( NumberInt, NumberInt, NumberInt );
- assertWidest( NumberLong, NumberInt, NumberLong );
- assertWidest( NumberDouble, NumberInt, NumberDouble );
- assertWidest( NumberLong, NumberLong, NumberLong );
- assertWidest( NumberDouble, NumberLong, NumberDouble );
- assertWidest( NumberDouble, NumberDouble, NumberDouble );
-
- // Missing value and numeric types (result Undefined).
- assertWidest( Undefined, NumberInt, Undefined );
- assertWidest( Undefined, NumberInt, Undefined );
- assertWidest( Undefined, NumberLong, jstNULL );
- assertWidest( Undefined, NumberLong, Undefined );
- assertWidest( Undefined, NumberDouble, jstNULL );
- assertWidest( Undefined, NumberDouble, Undefined );
-
- // Missing value types (result Undefined).
- assertWidest( Undefined, jstNULL, jstNULL );
- assertWidest( Undefined, jstNULL, Undefined );
- assertWidest( Undefined, Undefined, Undefined );
-
- // Other types (result Undefined).
- assertWidest( Undefined, NumberInt, mongo::Bool );
- assertWidest( Undefined, mongo::String, NumberDouble );
- }
- private:
- void assertWidest( BSONType expectedWidest, BSONType a, BSONType b ) {
- ASSERT_EQUALS( expectedWidest, Value::getWidestNumeric( a, b ) );
- ASSERT_EQUALS( expectedWidest, Value::getWidestNumeric( b, a ) );
- }
- };
-
- /** Add a Value to a BSONObj. */
- class AddToBsonObj {
- public:
- void run() {
- BSONObjBuilder bob;
- Value( 4.4 ).addToBsonObj( &bob, "a" );
- Value( 22 ).addToBsonObj( &bob, "b" );
- Value( "astring" ).addToBsonObj( &bob, "c" );
- ASSERT_EQUALS( BSON( "a" << 4.4 << "b" << 22 << "c" << "astring" ), bob.obj() );
- }
- };
-
- /** Add a Value to a BSONArray. */
- class AddToBsonArray {
- public:
- void run() {
- BSONArrayBuilder bab;
- Value( 4.4 ).addToBsonArray( &bab );
- Value( 22 ).addToBsonArray( &bab );
- Value( "astring" ).addToBsonArray( &bab );
- ASSERT_EQUALS( BSON_ARRAY( 4.4 << 22 << "astring" ), bab.arr() );
- }
- };
-
- /** Value comparator. */
- class Compare {
- public:
- void run() {
- BSONObjBuilder undefinedBuilder;
- undefinedBuilder.appendUndefined( "" );
- BSONObj undefined = undefinedBuilder.obj();
-
- // Undefined / null.
- assertComparison( 0, undefined, undefined );
- assertComparison( -1, undefined, BSON( "" << BSONNULL ) );
- assertComparison( 0, BSON( "" << BSONNULL ), BSON( "" << BSONNULL ) );
-
- // Undefined / null with other types.
- assertComparison( -1, undefined, BSON( "" << 1 ) );
- assertComparison( -1, undefined, BSON( "" << "bar" ) );
- assertComparison( -1, BSON( "" << BSONNULL ), BSON( "" << -1 ) );
- assertComparison( -1, BSON( "" << BSONNULL ), BSON( "" << "bar" ) );
-
- // Numeric types.
- assertComparison( 0, 5, 5LL );
- assertComparison( 0, -2, -2.0 );
- assertComparison( 0, 90LL, 90.0 );
- assertComparison( -1, 5, 6LL );
- assertComparison( -1, -2, 2.1 );
- assertComparison( 1, 90LL, 89.999 );
- assertComparison( -1, 90, 90.1 );
- assertComparison( 0, numeric_limits<double>::quiet_NaN(),
- numeric_limits<double>::signaling_NaN() );
- assertComparison( -1, numeric_limits<double>::quiet_NaN(), 5 );
-
- // strings compare between numbers and objects
- assertComparison( 1, "abc", 90 );
- assertComparison( -1, "abc", BSON( "a" << "b" ) );
-
- // String comparison.
- assertComparison( -1, "", "a" );
- assertComparison( 0, "a", "a" );
- assertComparison( -1, "a", "b" );
- assertComparison( -1, "aa", "b" );
- assertComparison( 1, "bb", "b" );
- assertComparison( 1, "bb", "b" );
- assertComparison( 1, "b-", "b" );
- assertComparison( -1, "b-", "ba" );
- // With a null character.
- assertComparison( 1, string( "a\0", 2 ), "a" );
-
- // Object.
- assertComparison( 0, fromjson( "{'':{}}" ), fromjson( "{'':{}}" ) );
- assertComparison( 0, fromjson( "{'':{x:1}}" ), fromjson( "{'':{x:1}}" ) );
- assertComparison( -1, fromjson( "{'':{}}" ), fromjson( "{'':{x:1}}" ) );
- assertComparison( -1, fromjson( "{'':{'z': 1}}" ), fromjson( "{'':{'a': 'a'}}") );
-
- // Array.
- assertComparison( 0, fromjson( "{'':[]}" ), fromjson( "{'':[]}" ) );
- assertComparison( -1, fromjson( "{'':[0]}" ), fromjson( "{'':[1]}" ) );
- assertComparison( -1, fromjson( "{'':[0,0]}" ), fromjson( "{'':[1]}" ) );
- assertComparison( -1, fromjson( "{'':[0]}" ), fromjson( "{'':[0,0]}" ) );
- assertComparison( -1, fromjson( "{'':[0]}" ), fromjson( "{'':['']}" ) );
-
- // OID.
- assertComparison( 0, OID( "abcdefabcdefabcdefabcdef" ),
- OID( "abcdefabcdefabcdefabcdef" ) );
- assertComparison( 1, OID( "abcdefabcdefabcdefabcdef" ),
- OID( "010101010101010101010101" ) );
-
- // Bool.
- assertComparison( 0, true, true );
- assertComparison( 0, false, false );
- assertComparison( 1, true, false );
-
- // Date.
- assertComparison( 0, Date_t( 555 ), Date_t( 555 ) );
- assertComparison( 1, Date_t( 555 ), Date_t( 554 ) );
- // Negative date.
- assertComparison( 1, Date_t( 0 ), Date_t( -1 ) );
-
- // Regex.
- assertComparison( 0, fromjson( "{'':/a/}" ), fromjson( "{'':/a/}" ) );
- assertComparison( -1, fromjson( "{'':/a/}" ), fromjson( "{'':/a/i}" ) );
- assertComparison( -1, fromjson( "{'':/a/}" ), fromjson( "{'':/aa/}" ) );
-
- // Timestamp.
- assertComparison( 0, OpTime( 1234 ), OpTime( 1234 ) );
- assertComparison( -1, OpTime( 4 ), OpTime( 1234 ) );
-
- // Cross-type comparisons. Listed in order of canonical types.
- assertComparison(-1, Value(mongo::MINKEY), Value());
- assertComparison(0, Value(), Value());
- assertComparison(0, Value(), Value(BSONUndefined));
- assertComparison(-1, Value(BSONUndefined), Value(BSONNULL));
- assertComparison(-1, Value(BSONNULL), Value(1));
- assertComparison(0, Value(1), Value(1LL));
- assertComparison(0, Value(1), Value(1.0));
- assertComparison(-1, Value(1), Value("string"));
- assertComparison(0, Value("string"), Value(BSONSymbol("string")));
- assertComparison(-1, Value("string"), Value(mongo::Document()));
- assertComparison(-1, Value(mongo::Document()), Value(vector<Value>()));
- assertComparison(-1, Value(vector<Value>()), Value(BSONBinData("", 0, MD5Type)));
- assertComparison(-1, Value(BSONBinData("", 0, MD5Type)), Value(mongo::OID()));
- assertComparison(-1, Value(mongo::OID()), Value(false));
- assertComparison(-1, Value(false), Value(Date_t(0)));
- assertComparison(-1, Value(Date_t(0)), Value(OpTime()));
- assertComparison(-1, Value(OpTime()), Value(BSONRegEx("")));
- assertComparison(-1, Value(BSONRegEx("")), Value(BSONDBRef("", mongo::OID())));
- assertComparison(-1, Value(BSONDBRef("", mongo::OID())), Value(BSONCode("")));
- assertComparison(-1, Value(BSONCode("")), Value(BSONCodeWScope("", BSONObj())));
- assertComparison(-1, Value(BSONCodeWScope("", BSONObj())), Value(mongo::MAXKEY));
- }
- private:
- template<class T,class U>
- void assertComparison( int expectedResult, const T& a, const U& b ) {
- assertComparison( expectedResult, BSON( "" << a ), BSON( "" << b ) );
- }
- void assertComparison( int expectedResult, const OpTime& a, const OpTime& b ) {
- BSONObjBuilder first;
- first.appendTimestamp( "", a.asDate() );
- BSONObjBuilder second;
- second.appendTimestamp( "", b.asDate() );
- assertComparison( expectedResult, first.obj(), second.obj() );
- }
- int sign(int cmp) {
- if (cmp == 0) return 0;
- else if (cmp < 0) return -1;
- else return 1;
- }
- int cmp( const Value& a, const Value& b ) {
- return sign(Value::compare(a, b));
- }
- void assertComparison( int expectedResult, const BSONObj& a, const BSONObj& b ) {
- assertComparison(expectedResult, fromBson(a), fromBson(b));
- }
- void assertComparison(int expectedResult, const Value& a, const Value& b) {
- mongo::unittest::log() <<
- "testing " << a.toString() << " and " << b.toString() << endl;
- // reflexivity
- ASSERT_EQUALS(0, cmp(a, a));
- ASSERT_EQUALS(0, cmp(b, b));
-
- // symmetry
- ASSERT_EQUALS( expectedResult, cmp( a, b ) );
- ASSERT_EQUALS( -expectedResult, cmp( b, a ) );
-
- if ( expectedResult == 0 ) {
- // equal values must hash equally.
- ASSERT_EQUALS( hash( a ), hash( b ) );
- }
- else {
- // unequal values must hash unequally.
- // (not true in general but we should error if it fails in any of these cases)
- ASSERT_NOT_EQUALS( hash( a ), hash( b ) );
- }
-
- // same as BSON
- ASSERT_EQUALS(expectedResult, sign(toBson(a).firstElement().woCompare(
- toBson(b).firstElement())));
- }
- size_t hash(const Value& v) {
- size_t seed = 0xf00ba6;
- v.hash_combine( seed );
- return seed;
- }
- };
-
- class SubFields {
- public:
- void run() {
- const Value val = fromBson(fromjson(
- "{'': {a: [{x:1, b:[1, {y:1, c:1234, z:1}, 1]}]}}"));
- // ^ this outer object is removed by fromBson
-
- ASSERT(val.getType() == mongo::Object);
-
- ASSERT(val[999].missing());
- ASSERT(val["missing"].missing());
- ASSERT(val["a"].getType() == mongo::Array);
-
- ASSERT(val["a"][999].missing());
- ASSERT(val["a"]["missing"].missing());
- ASSERT(val["a"][0].getType() == mongo::Object);
-
- ASSERT(val["a"][0][999].missing());
- ASSERT(val["a"][0]["missing"].missing());
- ASSERT(val["a"][0]["b"].getType() == mongo::Array);
-
- ASSERT(val["a"][0]["b"][999].missing());
- ASSERT(val["a"][0]["b"]["missing"].missing());
- ASSERT(val["a"][0]["b"][1].getType() == mongo::Object);
-
- ASSERT(val["a"][0]["b"][1][999].missing());
- ASSERT(val["a"][0]["b"][1]["missing"].missing());
- ASSERT(val["a"][0]["b"][1]["c"].getType() == mongo::NumberInt);
- ASSERT_EQUALS(val["a"][0]["b"][1]["c"].getInt(), 1234);
- }
- };
-
-
- class SerializationOfMissingForSorter {
- // Can't be tested in AllTypesDoc since missing values are omitted when adding to BSON.
- public:
- void run() {
- const Value missing;
- const Value arrayOfMissing = Value(vector<Value>(10));
-
- BufBuilder bb;
- missing.serializeForSorter(bb);
- arrayOfMissing.serializeForSorter(bb);
-
- BufReader reader(bb.buf(), bb.len());
- ASSERT_EQUALS(
- missing,
- Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
- ASSERT_EQUALS(
- arrayOfMissing,
- Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
- }
- };
- } // namespace Value
-
- class All : public Suite {
- public:
- All() : Suite( "document" ) {
- }
- void setupTests() {
- add<Document::Create>();
- add<Document::CreateFromBsonObj>();
- add<Document::AddField>();
- add<Document::GetValue>();
- add<Document::SetField>();
- add<Document::Compare>();
- add<Document::Clone>();
- add<Document::CloneMultipleFields>();
- add<Document::FieldIteratorEmpty>();
- add<Document::FieldIteratorSingle>();
- add<Document::FieldIteratorMultiple>();
- add<Document::AllTypesDoc>();
-
- add<Value::BSONArrayTest>();
- add<Value::Int>();
- add<Value::Long>();
- add<Value::Double>();
- add<Value::String>();
- add<Value::StringWithNull>();
- add<Value::Date>();
- add<Value::Timestamp>();
- add<Value::EmptyDocument>();
- add<Value::EmptyArray>();
- add<Value::Array>();
- add<Value::Oid>();
- add<Value::Bool>();
- add<Value::Regex>();
- add<Value::Symbol>();
- add<Value::Undefined>();
- add<Value::Null>();
- add<Value::True>();
- add<Value::False>();
- add<Value::MinusOne>();
- add<Value::Zero>();
- add<Value::One>();
-
- add<Value::Coerce::ZeroIntToBool>();
- add<Value::Coerce::NonZeroIntToBool>();
- add<Value::Coerce::ZeroLongToBool>();
- add<Value::Coerce::NonZeroLongToBool>();
- add<Value::Coerce::ZeroDoubleToBool>();
- add<Value::Coerce::NonZeroDoubleToBool>();
- add<Value::Coerce::StringToBool>();
- add<Value::Coerce::ObjectToBool>();
- add<Value::Coerce::ArrayToBool>();
- add<Value::Coerce::DateToBool>();
- add<Value::Coerce::RegexToBool>();
- add<Value::Coerce::TrueToBool>();
- add<Value::Coerce::FalseToBool>();
- add<Value::Coerce::NullToBool>();
- add<Value::Coerce::UndefinedToBool>();
- add<Value::Coerce::IntToInt>();
- add<Value::Coerce::LongToInt>();
- add<Value::Coerce::DoubleToInt>();
- add<Value::Coerce::NullToInt>();
- add<Value::Coerce::UndefinedToInt>();
- add<Value::Coerce::StringToInt>();
- add<Value::Coerce::IntToLong>();
- add<Value::Coerce::LongToLong>();
- add<Value::Coerce::DoubleToLong>();
- add<Value::Coerce::NullToLong>();
- add<Value::Coerce::UndefinedToLong>();
- add<Value::Coerce::StringToLong>();
- add<Value::Coerce::IntToDouble>();
- add<Value::Coerce::LongToDouble>();
- add<Value::Coerce::DoubleToDouble>();
- add<Value::Coerce::NullToDouble>();
- add<Value::Coerce::UndefinedToDouble>();
- add<Value::Coerce::StringToDouble>();
- add<Value::Coerce::DateToDate>();
- add<Value::Coerce::TimestampToDate>();
- add<Value::Coerce::StringToDate>();
- add<Value::Coerce::DoubleToString>();
- add<Value::Coerce::IntToString>();
- add<Value::Coerce::LongToString>();
- add<Value::Coerce::StringToString>();
- add<Value::Coerce::TimestampToString>();
- add<Value::Coerce::DateToString>();
- add<Value::Coerce::NullToString>();
- add<Value::Coerce::UndefinedToString>();
- add<Value::Coerce::DocumentToString>();
- add<Value::Coerce::TimestampToTimestamp>();
- add<Value::Coerce::DateToTimestamp>();
-
- add<Value::GetWidestNumeric>();
- add<Value::AddToBsonObj>();
- add<Value::AddToBsonArray>();
- add<Value::Compare>();
- add<Value::SubFields>();
- add<Value::SerializationOfMissingForSorter>();
+ ASSERT_EQUALS(Value(1), document.getNestedField(FieldPath("a.b")));
+ ASSERT_EQUALS(Value(2), cloneOnDemand.peek().getNestedField(FieldPath("a.b")));
+ ASSERT_EQUALS(DOC("a" << DOC("b" << 1)), document);
+ ASSERT_EQUALS(DOC("a" << DOC("b" << 2)), cloneOnDemand.freeze());
+ }
+};
+
+/** Shallow copy clone of a multi field Document. */
+class CloneMultipleFields {
+public:
+ void run() {
+ Document document = fromBson(fromjson("{a:1,b:['ra',4],c:{z:1},d:'lal'}"));
+ Document clonedDocument = document.clone();
+ ASSERT_EQUALS(document, clonedDocument);
+ }
+};
+
+/** FieldIterator for an empty Document. */
+class FieldIteratorEmpty {
+public:
+ void run() {
+ FieldIterator iterator((Document()));
+ ASSERT(!iterator.more());
+ }
+};
+
+/** FieldIterator for a single field Document. */
+class FieldIteratorSingle {
+public:
+ void run() {
+ FieldIterator iterator(fromBson(BSON("a" << 1)));
+ ASSERT(iterator.more());
+ Document::FieldPair field = iterator.next();
+ ASSERT_EQUALS("a", field.first.toString());
+ ASSERT_EQUALS(1, field.second.getInt());
+ ASSERT(!iterator.more());
+ }
+};
+
+/** FieldIterator for a multiple field Document. */
+class FieldIteratorMultiple {
+public:
+ void run() {
+ FieldIterator iterator(fromBson(BSON("a" << 1 << "b" << 5.6 << "c"
+ << "z")));
+ ASSERT(iterator.more());
+ Document::FieldPair field = iterator.next();
+ ASSERT_EQUALS("a", field.first.toString());
+ ASSERT_EQUALS(1, field.second.getInt());
+ ASSERT(iterator.more());
+
+ Document::FieldPair field2 = iterator.next();
+ ASSERT_EQUALS("b", field2.first.toString());
+ ASSERT_EQUALS(5.6, field2.second.getDouble());
+ ASSERT(iterator.more());
+
+ Document::FieldPair field3 = iterator.next();
+ ASSERT_EQUALS("c", field3.first.toString());
+ ASSERT_EQUALS("z", field3.second.getString());
+ ASSERT(!iterator.more());
+ }
+};
+
+class AllTypesDoc {
+public:
+ void run() {
+ // These are listed in order of BSONType with some duplicates
+ append("minkey", MINKEY);
+ // EOO not valid in middle of BSONObj
+ append("double", 1.0);
+ append("c-string", "string\0after NUL"); // after NULL is ignored
+ append("c++", StringData("string\0after NUL", StringData::LiteralTag()).toString());
+ append("StringData", StringData("string\0after NUL", StringData::LiteralTag()));
+ append("emptyObj", BSONObj());
+ append("filledObj", BSON("a" << 1));
+ append("emptyArray", BSON("" << BSONArray()).firstElement());
+ append("filledArray", BSON("" << BSON_ARRAY(1 << "a")).firstElement());
+ append("binData", BSONBinData("a\0b", 3, BinDataGeneral));
+ append("binDataCustom", BSONBinData("a\0b", 3, bdtCustom));
+ append("binDataUUID", BSONBinData("123456789\0abcdef", 16, bdtUUID));
+ append("undefined", BSONUndefined);
+ append("oid", OID());
+ append("true", true);
+ append("false", false);
+ append("date", jsTime());
+ append("null", BSONNULL);
+ append("regex", BSONRegEx(".*"));
+ append("regexFlags", BSONRegEx(".*", "i"));
+ append("regexEmpty", BSONRegEx("", ""));
+ append("dbref", BSONDBRef("foo", OID()));
+ append("code", BSONCode("function() {}"));
+ append("codeNul", BSONCode(StringData("var nul = '\0'", StringData::LiteralTag())));
+ append("symbol", BSONSymbol("foo"));
+ append("symbolNul", BSONSymbol(StringData("f\0o", StringData::LiteralTag())));
+ append("codeWScope", BSONCodeWScope("asdf", BSONObj()));
+ append("codeWScopeWScope", BSONCodeWScope("asdf", BSON("one" << 1)));
+ append("int", 1);
+ append("timestamp", OpTime());
+ append("long", 1LL);
+ append("very long", 1LL << 40);
+ append("maxkey", MAXKEY);
+
+ const BSONArray arr = arrBuilder.arr();
+
+ // can't use append any more since arrBuilder is done
+ objBuilder << "mega array" << arr;
+ docBuilder["mega array"] = mongo::Value(values);
+
+ const BSONObj obj = objBuilder.obj();
+ const Document doc = docBuilder.freeze();
+
+ const BSONObj obj2 = toBson(doc);
+ const Document doc2 = fromBson(obj);
+
+ // logical equality
+ ASSERT_EQUALS(obj, obj2);
+ ASSERT_EQUALS(doc, doc2);
+
+ // binary equality
+ ASSERT_EQUALS(obj.objsize(), obj2.objsize());
+ ASSERT_EQUALS(memcmp(obj.objdata(), obj2.objdata(), obj.objsize()), 0);
+
+ // ensure sorter serialization round-trips correctly
+ BufBuilder bb;
+ doc.serializeForSorter(bb);
+ BufReader reader(bb.buf(), bb.len());
+ const Document doc3 =
+ Document::deserializeForSorter(reader, Document::SorterDeserializeSettings());
+ BSONObj obj3 = toBson(doc3);
+ ASSERT_EQUALS(obj.objsize(), obj3.objsize());
+ ASSERT_EQUALS(memcmp(obj.objdata(), obj3.objdata(), obj.objsize()), 0);
+ }
+
+ template <typename T>
+ void append(const char* name, const T& thing) {
+ objBuilder << name << thing;
+ arrBuilder << thing;
+ docBuilder[name] = mongo::Value(thing);
+ values.push_back(mongo::Value(thing));
+ }
+
+ vector<mongo::Value> values;
+ MutableDocument docBuilder;
+ BSONObjBuilder objBuilder;
+ BSONArrayBuilder arrBuilder;
+};
+} // namespace Document
+
+namespace Value {
+
+using mongo::Value;
+
+BSONObj toBson(const Value& value) {
+ if (value.missing())
+ return BSONObj(); // EOO
+
+ BSONObjBuilder bob;
+ value.addToBsonObj(&bob, "");
+ return bob.obj();
+}
+
+Value fromBson(const BSONObj& obj) {
+ BSONElement element = obj.firstElement();
+ return Value(element);
+}
+
+void assertRoundTrips(const Value& value1) {
+ BSONObj obj1 = toBson(value1);
+ Value value2 = fromBson(obj1);
+ BSONObj obj2 = toBson(value2);
+ ASSERT_EQUALS(obj1, obj2);
+ ASSERT_EQUALS(value1, value2);
+ ASSERT_EQUALS(value1.getType(), value2.getType());
+}
+
+class BSONArrayTest {
+public:
+ void run() {
+ ASSERT_EQUALS(Value(BSON_ARRAY(1 << 2 << 3)), DOC_ARRAY(1 << 2 << 3));
+ ASSERT_EQUALS(Value(BSONArray()), Value(vector<Value>()));
+ }
+};
+
+/** Int type. */
+class Int {
+public:
+ void run() {
+ Value value = Value(5);
+ ASSERT_EQUALS(5, value.getInt());
+ ASSERT_EQUALS(5, value.getLong());
+ ASSERT_EQUALS(5, value.getDouble());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Long type. */
+class Long {
+public:
+ void run() {
+ Value value = Value(99LL);
+ ASSERT_EQUALS(99, value.getLong());
+ ASSERT_EQUALS(99, value.getDouble());
+ ASSERT_EQUALS(NumberLong, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Double type. */
+class Double {
+public:
+ void run() {
+ Value value = Value(5.5);
+ ASSERT_EQUALS(5.5, value.getDouble());
+ ASSERT_EQUALS(NumberDouble, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** String type. */
+class String {
+public:
+ void run() {
+ Value value = Value("foo");
+ ASSERT_EQUALS("foo", value.getString());
+ ASSERT_EQUALS(mongo::String, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** String with a null character. */
+class StringWithNull {
+public:
+ void run() {
+ string withNull("a\0b", 3);
+ BSONObj objWithNull = BSON("" << withNull);
+ ASSERT_EQUALS(withNull, objWithNull[""].str());
+ Value value = fromBson(objWithNull);
+ ASSERT_EQUALS(withNull, value.getString());
+ assertRoundTrips(value);
+ }
+};
+
+/** Date type. */
+class Date {
+public:
+ void run() {
+ Value value = Value(Date_t(999));
+ ASSERT_EQUALS(999, value.getDate());
+ ASSERT_EQUALS(mongo::Date, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Timestamp type. */
+class Timestamp {
+public:
+ void run() {
+ Value value = Value(OpTime(777));
+ ASSERT(OpTime(777) == value.getTimestamp());
+ ASSERT_EQUALS(mongo::Timestamp, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Document with no fields. */
+class EmptyDocument {
+public:
+ void run() {
+ mongo::Document document = mongo::Document();
+ Value value = Value(document);
+ ASSERT_EQUALS(document.getPtr(), value.getDocument().getPtr());
+ ASSERT_EQUALS(Object, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Document type. */
+class Document {
+public:
+ void run() {
+ mongo::MutableDocument md;
+ md.addField("a", Value(5));
+ md.addField("apple", Value("rrr"));
+ md.addField("banana", Value(-.3));
+ mongo::Document document = md.freeze();
+
+ Value value = Value(document);
+ // Check document pointers are equal.
+ ASSERT_EQUALS(document.getPtr(), value.getDocument().getPtr());
+ // Check document contents.
+ ASSERT_EQUALS(5, document["a"].getInt());
+ ASSERT_EQUALS("rrr", document["apple"].getString());
+ ASSERT_EQUALS(-.3, document["banana"].getDouble());
+ ASSERT_EQUALS(Object, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Array with no elements. */
+class EmptyArray {
+public:
+ void run() {
+ vector<Value> array;
+ Value value(array);
+ const vector<Value>& array2 = value.getArray();
+
+ ASSERT(array2.empty());
+ ASSERT_EQUALS(Array, value.getType());
+ ASSERT_EQUALS(0U, value.getArrayLength());
+ assertRoundTrips(value);
+ }
+};
+
+/** Array type. */
+class Array {
+public:
+ void run() {
+ vector<Value> array;
+ array.push_back(Value(5));
+ array.push_back(Value("lala"));
+ array.push_back(Value(3.14));
+ Value value = Value(array);
+ const vector<Value>& array2 = value.getArray();
+
+ ASSERT(!array2.empty());
+ ASSERT_EQUALS(array2.size(), 3U);
+ ASSERT_EQUALS(5, array2[0].getInt());
+ ASSERT_EQUALS("lala", array2[1].getString());
+ ASSERT_EQUALS(3.14, array2[2].getDouble());
+ ASSERT_EQUALS(mongo::Array, value.getType());
+ ASSERT_EQUALS(3U, value.getArrayLength());
+ assertRoundTrips(value);
+ }
+};
+
+/** Oid type. */
+class Oid {
+public:
+ void run() {
+ Value value = fromBson(BSON("" << OID("abcdefabcdefabcdefabcdef")));
+ ASSERT_EQUALS(OID("abcdefabcdefabcdefabcdef"), value.getOid());
+ ASSERT_EQUALS(jstOID, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Bool type. */
+class Bool {
+public:
+ void run() {
+ Value value = fromBson(BSON("" << true));
+ ASSERT_EQUALS(true, value.getBool());
+ ASSERT_EQUALS(mongo::Bool, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Regex type. */
+class Regex {
+public:
+ void run() {
+ Value value = fromBson(fromjson("{'':/abc/}"));
+ ASSERT_EQUALS(string("abc"), value.getRegex());
+ ASSERT_EQUALS(RegEx, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Symbol type (currently unsupported). */
+class Symbol {
+public:
+ void run() {
+ Value value(BSONSymbol("FOOBAR"));
+ ASSERT_EQUALS("FOOBAR", value.getSymbol());
+ ASSERT_EQUALS(mongo::Symbol, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Undefined type. */
+class Undefined {
+public:
+ void run() {
+ Value value = Value(BSONUndefined);
+ ASSERT_EQUALS(mongo::Undefined, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Null type. */
+class Null {
+public:
+ void run() {
+ Value value = Value(BSONNULL);
+ ASSERT_EQUALS(jstNULL, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** True value. */
+class True {
+public:
+ void run() {
+ Value value = Value(true);
+ ASSERT_EQUALS(true, value.getBool());
+ ASSERT_EQUALS(mongo::Bool, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** False value. */
+class False {
+public:
+ void run() {
+ Value value = Value(false);
+ ASSERT_EQUALS(false, value.getBool());
+ ASSERT_EQUALS(mongo::Bool, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** -1 value. */
+class MinusOne {
+public:
+ void run() {
+ Value value = Value(-1);
+ ASSERT_EQUALS(-1, value.getInt());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** 0 value. */
+class Zero {
+public:
+ void run() {
+ Value value = Value(0);
+ ASSERT_EQUALS(0, value.getInt());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** 1 value. */
+class One {
+public:
+ void run() {
+ Value value = Value(1);
+ ASSERT_EQUALS(1, value.getInt());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+namespace Coerce {
+
+class ToBoolBase {
+public:
+ virtual ~ToBoolBase() {}
+ void run() {
+ ASSERT_EQUALS(expected(), value().coerceToBool());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual bool expected() = 0;
+};
+
+class ToBoolTrue : public ToBoolBase {
+ bool expected() {
+ return true;
+ }
+};
+
+class ToBoolFalse : public ToBoolBase {
+ bool expected() {
+ return false;
+ }
+};
+
+/** Coerce 0 to bool. */
+class ZeroIntToBool : public ToBoolFalse {
+ Value value() {
+ return Value(0);
+ }
+};
+
+/** Coerce -1 to bool. */
+class NonZeroIntToBool : public ToBoolTrue {
+ Value value() {
+ return Value(-1);
+ }
+};
+
+/** Coerce 0LL to bool. */
+class ZeroLongToBool : public ToBoolFalse {
+ Value value() {
+ return Value(0LL);
+ }
+};
+
+/** Coerce 5LL to bool. */
+class NonZeroLongToBool : public ToBoolTrue {
+ Value value() {
+ return Value(5LL);
+ }
+};
+
+/** Coerce 0.0 to bool. */
+class ZeroDoubleToBool : public ToBoolFalse {
+ Value value() {
+ return Value(0);
+ }
+};
+
+/** Coerce -1.3 to bool. */
+class NonZeroDoubleToBool : public ToBoolTrue {
+ Value value() {
+ return Value(-1.3);
+ }
+};
+
+/** Coerce "" to bool. */
+class StringToBool : public ToBoolTrue {
+ Value value() {
+ return Value("");
+ }
+};
+
+/** Coerce {} to bool. */
+class ObjectToBool : public ToBoolTrue {
+ Value value() {
+ return Value(mongo::Document());
+ }
+};
+
+/** Coerce [] to bool. */
+class ArrayToBool : public ToBoolTrue {
+ Value value() {
+ return Value(vector<Value>());
+ }
+};
+
+/** Coerce Date(0) to bool. */
+class DateToBool : public ToBoolTrue {
+ Value value() {
+ return Value(Date_t(0));
+ }
+};
+
+/** Coerce js literal regex to bool. */
+class RegexToBool : public ToBoolTrue {
+ Value value() {
+ return fromBson(fromjson("{''://}"));
+ }
+};
+
+/** Coerce true to bool. */
+class TrueToBool : public ToBoolTrue {
+ Value value() {
+ return fromBson(BSON("" << true));
+ }
+};
+
+/** Coerce false to bool. */
+class FalseToBool : public ToBoolFalse {
+ Value value() {
+ return fromBson(BSON("" << false));
+ }
+};
+
+/** Coerce null to bool. */
+class NullToBool : public ToBoolFalse {
+ Value value() {
+ return Value(BSONNULL);
+ }
+};
+
+/** Coerce undefined to bool. */
+class UndefinedToBool : public ToBoolFalse {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+};
+
+class ToIntBase {
+public:
+ virtual ~ToIntBase() {}
+ void run() {
+ if (asserts())
+ ASSERT_THROWS(value().coerceToInt(), UserException);
+ else
+ ASSERT_EQUALS(expected(), value().coerceToInt());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual int expected() {
+ return 0;
+ }
+ virtual bool asserts() {
+ return false;
+ }
+};
+
+/** Coerce -5 to int. */
+class IntToInt : public ToIntBase {
+ Value value() {
+ return Value(-5);
+ }
+ int expected() {
+ return -5;
+ }
+};
+
+/** Coerce long to int. */
+class LongToInt : public ToIntBase {
+ Value value() {
+ return Value(0xff00000007LL);
+ }
+ int expected() {
+ return 7;
+ }
+};
+
+/** Coerce 9.8 to int. */
+class DoubleToInt : public ToIntBase {
+ Value value() {
+ return Value(9.8);
+ }
+ int expected() {
+ return 9;
+ }
+};
+
+/** Coerce null to int. */
+class NullToInt : public ToIntBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce undefined to int. */
+class UndefinedToInt : public ToIntBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce "" to int unsupported. */
+class StringToInt {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToInt(), UserException);
+ }
+};
+
+class ToLongBase {
+public:
+ virtual ~ToLongBase() {}
+ void run() {
+ if (asserts())
+ ASSERT_THROWS(value().coerceToLong(), UserException);
+ else
+ ASSERT_EQUALS(expected(), value().coerceToLong());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual long long expected() {
+ return 0;
+ }
+ virtual bool asserts() {
+ return false;
+ }
+};
+
+/** Coerce -5 to long. */
+class IntToLong : public ToLongBase {
+ Value value() {
+ return Value(-5);
+ }
+ long long expected() {
+ return -5;
+ }
+};
+
+/** Coerce long to long. */
+class LongToLong : public ToLongBase {
+ Value value() {
+ return Value(0xff00000007LL);
+ }
+ long long expected() {
+ return 0xff00000007LL;
+ }
+};
+
+/** Coerce 9.8 to long. */
+class DoubleToLong : public ToLongBase {
+ Value value() {
+ return Value(9.8);
+ }
+ long long expected() {
+ return 9;
+ }
+};
+
+/** Coerce null to long. */
+class NullToLong : public ToLongBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce undefined to long. */
+class UndefinedToLong : public ToLongBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce string to long unsupported. */
+class StringToLong {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToLong(), UserException);
+ }
+};
+
+class ToDoubleBase {
+public:
+ virtual ~ToDoubleBase() {}
+ void run() {
+ if (asserts())
+ ASSERT_THROWS(value().coerceToDouble(), UserException);
+ else
+ ASSERT_EQUALS(expected(), value().coerceToDouble());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual double expected() {
+ return 0;
+ }
+ virtual bool asserts() {
+ return false;
+ }
+};
+
+/** Coerce -5 to double. */
+class IntToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(-5);
+ }
+ double expected() {
+ return -5;
+ }
+};
+
+/** Coerce long to double. */
+class LongToDouble : public ToDoubleBase {
+ Value value() {
+ // A long that cannot be exactly represented as a double.
+ return Value(static_cast<double>(0x8fffffffffffffffLL));
+ }
+ double expected() {
+ return static_cast<double>(0x8fffffffffffffffLL);
+ }
+};
+
+/** Coerce double to double. */
+class DoubleToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(9.8);
+ }
+ double expected() {
+ return 9.8;
+ }
+};
+
+/** Coerce null to double. */
+class NullToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce undefined to double. */
+class UndefinedToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce string to double unsupported. */
+class StringToDouble {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToDouble(), UserException);
+ }
+};
+
+class ToDateBase {
+public:
+ virtual ~ToDateBase() {}
+ void run() {
+ ASSERT_EQUALS(expected(), value().coerceToDate());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual long long expected() = 0;
+};
+
+/** Coerce date to date. */
+class DateToDate : public ToDateBase {
+ Value value() {
+ return Value(Date_t(888));
+ }
+ long long expected() {
+ return 888;
+ }
+};
+
+/**
+ * Convert timestamp to date. This extracts the time portion of the timestamp, which
+ * is different from BSON behavior of interpreting all bytes as a date.
+ */
+class TimestampToDate : public ToDateBase {
+ Value value() {
+ return Value(OpTime(777, 666));
+ }
+ long long expected() {
+ return 777 * 1000;
+ }
+};
+
+/** Coerce string to date unsupported. */
+class StringToDate {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToDate(), UserException);
+ }
+};
+
+class ToStringBase {
+public:
+ virtual ~ToStringBase() {}
+ void run() {
+ ASSERT_EQUALS(expected(), value().coerceToString());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual string expected() {
+ return "";
+ }
+};
+
+/** Coerce -0.2 to string. */
+class DoubleToString : public ToStringBase {
+ Value value() {
+ return Value(-0.2);
+ }
+ string expected() {
+ return "-0.2";
+ }
+};
+
+/** Coerce -4 to string. */
+class IntToString : public ToStringBase {
+ Value value() {
+ return Value(-4);
+ }
+ string expected() {
+ return "-4";
+ }
+};
+
+/** Coerce 10000LL to string. */
+class LongToString : public ToStringBase {
+ Value value() {
+ return Value(10000LL);
+ }
+ string expected() {
+ return "10000";
+ }
+};
+
+/** Coerce string to string. */
+class StringToString : public ToStringBase {
+ Value value() {
+ return Value("fO_o");
+ }
+ string expected() {
+ return "fO_o";
+ }
+};
+
+/** Coerce timestamp to string. */
+class TimestampToString : public ToStringBase {
+ Value value() {
+ return Value(OpTime(1, 2));
+ }
+ string expected() {
+ return OpTime(1, 2).toStringPretty();
+ }
+};
+
+/** Coerce date to string. */
+class DateToString : public ToStringBase {
+ Value value() {
+ return Value(Date_t(1234567890LL * 1000));
+ }
+ string expected() {
+ return "2009-02-13T23:31:30";
+ } // from js
+};
+
+/** Coerce null to string. */
+class NullToString : public ToStringBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+};
+
+/** Coerce undefined to string. */
+class UndefinedToString : public ToStringBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+};
+
+/** Coerce document to string unsupported. */
+class DocumentToString {
+public:
+ void run() {
+ ASSERT_THROWS(Value(mongo::Document()).coerceToString(), UserException);
+ }
+};
+
+/** Coerce timestamp to timestamp. */
+class TimestampToTimestamp {
+public:
+ void run() {
+ Value value = Value(OpTime(1010));
+ ASSERT(OpTime(1010) == value.coerceToTimestamp());
+ }
+};
+
+/** Coerce date to timestamp unsupported. */
+class DateToTimestamp {
+public:
+ void run() {
+ ASSERT_THROWS(Value(Date_t(1010)).coerceToTimestamp(), UserException);
+ }
+};
+
+} // namespace Coerce
+
+/** Get the "widest" of two numeric types. */
+class GetWidestNumeric {
+public:
+ void run() {
+ using mongo::Undefined;
+
+ // Numeric types.
+ assertWidest(NumberInt, NumberInt, NumberInt);
+ assertWidest(NumberLong, NumberInt, NumberLong);
+ assertWidest(NumberDouble, NumberInt, NumberDouble);
+ assertWidest(NumberLong, NumberLong, NumberLong);
+ assertWidest(NumberDouble, NumberLong, NumberDouble);
+ assertWidest(NumberDouble, NumberDouble, NumberDouble);
+
+ // Missing value and numeric types (result Undefined).
+ assertWidest(Undefined, NumberInt, Undefined);
+ assertWidest(Undefined, NumberInt, Undefined);
+ assertWidest(Undefined, NumberLong, jstNULL);
+ assertWidest(Undefined, NumberLong, Undefined);
+ assertWidest(Undefined, NumberDouble, jstNULL);
+ assertWidest(Undefined, NumberDouble, Undefined);
+
+ // Missing value types (result Undefined).
+ assertWidest(Undefined, jstNULL, jstNULL);
+ assertWidest(Undefined, jstNULL, Undefined);
+ assertWidest(Undefined, Undefined, Undefined);
+
+ // Other types (result Undefined).
+ assertWidest(Undefined, NumberInt, mongo::Bool);
+ assertWidest(Undefined, mongo::String, NumberDouble);
+ }
+
+private:
+ void assertWidest(BSONType expectedWidest, BSONType a, BSONType b) {
+ ASSERT_EQUALS(expectedWidest, Value::getWidestNumeric(a, b));
+ ASSERT_EQUALS(expectedWidest, Value::getWidestNumeric(b, a));
+ }
+};
+
+/** Add a Value to a BSONObj. */
+class AddToBsonObj {
+public:
+ void run() {
+ BSONObjBuilder bob;
+ Value(4.4).addToBsonObj(&bob, "a");
+ Value(22).addToBsonObj(&bob, "b");
+ Value("astring").addToBsonObj(&bob, "c");
+ ASSERT_EQUALS(BSON("a" << 4.4 << "b" << 22 << "c"
+ << "astring"),
+ bob.obj());
+ }
+};
+
+/** Add a Value to a BSONArray. */
+class AddToBsonArray {
+public:
+ void run() {
+ BSONArrayBuilder bab;
+ Value(4.4).addToBsonArray(&bab);
+ Value(22).addToBsonArray(&bab);
+ Value("astring").addToBsonArray(&bab);
+ ASSERT_EQUALS(BSON_ARRAY(4.4 << 22 << "astring"), bab.arr());
+ }
+};
+
+/** Value comparator. */
+class Compare {
+public:
+ void run() {
+ BSONObjBuilder undefinedBuilder;
+ undefinedBuilder.appendUndefined("");
+ BSONObj undefined = undefinedBuilder.obj();
+
+ // Undefined / null.
+ assertComparison(0, undefined, undefined);
+ assertComparison(-1, undefined, BSON("" << BSONNULL));
+ assertComparison(0, BSON("" << BSONNULL), BSON("" << BSONNULL));
+
+ // Undefined / null with other types.
+ assertComparison(-1, undefined, BSON("" << 1));
+ assertComparison(-1,
+ undefined,
+ BSON(""
+ << "bar"));
+ assertComparison(-1, BSON("" << BSONNULL), BSON("" << -1));
+ assertComparison(-1,
+ BSON("" << BSONNULL),
+ BSON(""
+ << "bar"));
+
+ // Numeric types.
+ assertComparison(0, 5, 5LL);
+ assertComparison(0, -2, -2.0);
+ assertComparison(0, 90LL, 90.0);
+ assertComparison(-1, 5, 6LL);
+ assertComparison(-1, -2, 2.1);
+ assertComparison(1, 90LL, 89.999);
+ assertComparison(-1, 90, 90.1);
+ assertComparison(
+ 0, numeric_limits<double>::quiet_NaN(), numeric_limits<double>::signaling_NaN());
+ assertComparison(-1, numeric_limits<double>::quiet_NaN(), 5);
+
+ // strings compare between numbers and objects
+ assertComparison(1, "abc", 90);
+ assertComparison(-1,
+ "abc",
+ BSON("a"
+ << "b"));
+
+ // String comparison.
+ assertComparison(-1, "", "a");
+ assertComparison(0, "a", "a");
+ assertComparison(-1, "a", "b");
+ assertComparison(-1, "aa", "b");
+ assertComparison(1, "bb", "b");
+ assertComparison(1, "bb", "b");
+ assertComparison(1, "b-", "b");
+ assertComparison(-1, "b-", "ba");
+ // With a null character.
+ assertComparison(1, string("a\0", 2), "a");
+
+ // Object.
+ assertComparison(0, fromjson("{'':{}}"), fromjson("{'':{}}"));
+ assertComparison(0, fromjson("{'':{x:1}}"), fromjson("{'':{x:1}}"));
+ assertComparison(-1, fromjson("{'':{}}"), fromjson("{'':{x:1}}"));
+ assertComparison(-1, fromjson("{'':{'z': 1}}"), fromjson("{'':{'a': 'a'}}"));
+
+ // Array.
+ assertComparison(0, fromjson("{'':[]}"), fromjson("{'':[]}"));
+ assertComparison(-1, fromjson("{'':[0]}"), fromjson("{'':[1]}"));
+ assertComparison(-1, fromjson("{'':[0,0]}"), fromjson("{'':[1]}"));
+ assertComparison(-1, fromjson("{'':[0]}"), fromjson("{'':[0,0]}"));
+ assertComparison(-1, fromjson("{'':[0]}"), fromjson("{'':['']}"));
+
+ // OID.
+ assertComparison(0, OID("abcdefabcdefabcdefabcdef"), OID("abcdefabcdefabcdefabcdef"));
+ assertComparison(1, OID("abcdefabcdefabcdefabcdef"), OID("010101010101010101010101"));
+
+ // Bool.
+ assertComparison(0, true, true);
+ assertComparison(0, false, false);
+ assertComparison(1, true, false);
+
+ // Date.
+ assertComparison(0, Date_t(555), Date_t(555));
+ assertComparison(1, Date_t(555), Date_t(554));
+ // Negative date.
+ assertComparison(1, Date_t(0), Date_t(-1));
+
+ // Regex.
+ assertComparison(0, fromjson("{'':/a/}"), fromjson("{'':/a/}"));
+ assertComparison(-1, fromjson("{'':/a/}"), fromjson("{'':/a/i}"));
+ assertComparison(-1, fromjson("{'':/a/}"), fromjson("{'':/aa/}"));
+
+ // Timestamp.
+ assertComparison(0, OpTime(1234), OpTime(1234));
+ assertComparison(-1, OpTime(4), OpTime(1234));
+
+ // Cross-type comparisons. Listed in order of canonical types.
+ assertComparison(-1, Value(mongo::MINKEY), Value());
+ assertComparison(0, Value(), Value());
+ assertComparison(0, Value(), Value(BSONUndefined));
+ assertComparison(-1, Value(BSONUndefined), Value(BSONNULL));
+ assertComparison(-1, Value(BSONNULL), Value(1));
+ assertComparison(0, Value(1), Value(1LL));
+ assertComparison(0, Value(1), Value(1.0));
+ assertComparison(-1, Value(1), Value("string"));
+ assertComparison(0, Value("string"), Value(BSONSymbol("string")));
+ assertComparison(-1, Value("string"), Value(mongo::Document()));
+ assertComparison(-1, Value(mongo::Document()), Value(vector<Value>()));
+ assertComparison(-1, Value(vector<Value>()), Value(BSONBinData("", 0, MD5Type)));
+ assertComparison(-1, Value(BSONBinData("", 0, MD5Type)), Value(mongo::OID()));
+ assertComparison(-1, Value(mongo::OID()), Value(false));
+ assertComparison(-1, Value(false), Value(Date_t(0)));
+ assertComparison(-1, Value(Date_t(0)), Value(OpTime()));
+ assertComparison(-1, Value(OpTime()), Value(BSONRegEx("")));
+ assertComparison(-1, Value(BSONRegEx("")), Value(BSONDBRef("", mongo::OID())));
+ assertComparison(-1, Value(BSONDBRef("", mongo::OID())), Value(BSONCode("")));
+ assertComparison(-1, Value(BSONCode("")), Value(BSONCodeWScope("", BSONObj())));
+ assertComparison(-1, Value(BSONCodeWScope("", BSONObj())), Value(mongo::MAXKEY));
+ }
+
+private:
+ template <class T, class U>
+ void assertComparison(int expectedResult, const T& a, const U& b) {
+ assertComparison(expectedResult, BSON("" << a), BSON("" << b));
+ }
+ void assertComparison(int expectedResult, const OpTime& a, const OpTime& b) {
+ BSONObjBuilder first;
+ first.appendTimestamp("", a.asDate());
+ BSONObjBuilder second;
+ second.appendTimestamp("", b.asDate());
+ assertComparison(expectedResult, first.obj(), second.obj());
+ }
+ int sign(int cmp) {
+ if (cmp == 0)
+ return 0;
+ else if (cmp < 0)
+ return -1;
+ else
+ return 1;
+ }
+ int cmp(const Value& a, const Value& b) {
+ return sign(Value::compare(a, b));
+ }
+ void assertComparison(int expectedResult, const BSONObj& a, const BSONObj& b) {
+ assertComparison(expectedResult, fromBson(a), fromBson(b));
+ }
+ void assertComparison(int expectedResult, const Value& a, const Value& b) {
+ mongo::unittest::log() << "testing " << a.toString() << " and " << b.toString() << endl;
+ // reflexivity
+ ASSERT_EQUALS(0, cmp(a, a));
+ ASSERT_EQUALS(0, cmp(b, b));
+
+ // symmetry
+ ASSERT_EQUALS(expectedResult, cmp(a, b));
+ ASSERT_EQUALS(-expectedResult, cmp(b, a));
+
+ if (expectedResult == 0) {
+ // equal values must hash equally.
+ ASSERT_EQUALS(hash(a), hash(b));
+ } else {
+ // unequal values must hash unequally.
+ // (not true in general but we should error if it fails in any of these cases)
+ ASSERT_NOT_EQUALS(hash(a), hash(b));
}
- };
- SuiteInstance<All> myall;
+ // same as BSON
+ ASSERT_EQUALS(expectedResult,
+ sign(toBson(a).firstElement().woCompare(toBson(b).firstElement())));
+ }
+ size_t hash(const Value& v) {
+ size_t seed = 0xf00ba6;
+ v.hash_combine(seed);
+ return seed;
+ }
+};
+
+class SubFields {
+public:
+ void run() {
+ const Value val = fromBson(fromjson("{'': {a: [{x:1, b:[1, {y:1, c:1234, z:1}, 1]}]}}"));
+ // ^ this outer object is removed by fromBson
+
+ ASSERT(val.getType() == mongo::Object);
+
+ ASSERT(val[999].missing());
+ ASSERT(val["missing"].missing());
+ ASSERT(val["a"].getType() == mongo::Array);
+
+ ASSERT(val["a"][999].missing());
+ ASSERT(val["a"]["missing"].missing());
+ ASSERT(val["a"][0].getType() == mongo::Object);
+
+ ASSERT(val["a"][0][999].missing());
+ ASSERT(val["a"][0]["missing"].missing());
+ ASSERT(val["a"][0]["b"].getType() == mongo::Array);
+
+ ASSERT(val["a"][0]["b"][999].missing());
+ ASSERT(val["a"][0]["b"]["missing"].missing());
+ ASSERT(val["a"][0]["b"][1].getType() == mongo::Object);
+
+ ASSERT(val["a"][0]["b"][1][999].missing());
+ ASSERT(val["a"][0]["b"][1]["missing"].missing());
+ ASSERT(val["a"][0]["b"][1]["c"].getType() == mongo::NumberInt);
+ ASSERT_EQUALS(val["a"][0]["b"][1]["c"].getInt(), 1234);
+ }
+};
+
+
+class SerializationOfMissingForSorter {
+ // Can't be tested in AllTypesDoc since missing values are omitted when adding to BSON.
+public:
+ void run() {
+ const Value missing;
+ const Value arrayOfMissing = Value(vector<Value>(10));
+
+ BufBuilder bb;
+ missing.serializeForSorter(bb);
+ arrayOfMissing.serializeForSorter(bb);
+
+ BufReader reader(bb.buf(), bb.len());
+ ASSERT_EQUALS(missing,
+ Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
+ ASSERT_EQUALS(arrayOfMissing,
+ Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
+ }
+};
+} // namespace Value
+
+class All : public Suite {
+public:
+ All() : Suite("document") {}
+ void setupTests() {
+ add<Document::Create>();
+ add<Document::CreateFromBsonObj>();
+ add<Document::AddField>();
+ add<Document::GetValue>();
+ add<Document::SetField>();
+ add<Document::Compare>();
+ add<Document::Clone>();
+ add<Document::CloneMultipleFields>();
+ add<Document::FieldIteratorEmpty>();
+ add<Document::FieldIteratorSingle>();
+ add<Document::FieldIteratorMultiple>();
+ add<Document::AllTypesDoc>();
+
+ add<Value::BSONArrayTest>();
+ add<Value::Int>();
+ add<Value::Long>();
+ add<Value::Double>();
+ add<Value::String>();
+ add<Value::StringWithNull>();
+ add<Value::Date>();
+ add<Value::Timestamp>();
+ add<Value::EmptyDocument>();
+ add<Value::EmptyArray>();
+ add<Value::Array>();
+ add<Value::Oid>();
+ add<Value::Bool>();
+ add<Value::Regex>();
+ add<Value::Symbol>();
+ add<Value::Undefined>();
+ add<Value::Null>();
+ add<Value::True>();
+ add<Value::False>();
+ add<Value::MinusOne>();
+ add<Value::Zero>();
+ add<Value::One>();
+
+ add<Value::Coerce::ZeroIntToBool>();
+ add<Value::Coerce::NonZeroIntToBool>();
+ add<Value::Coerce::ZeroLongToBool>();
+ add<Value::Coerce::NonZeroLongToBool>();
+ add<Value::Coerce::ZeroDoubleToBool>();
+ add<Value::Coerce::NonZeroDoubleToBool>();
+ add<Value::Coerce::StringToBool>();
+ add<Value::Coerce::ObjectToBool>();
+ add<Value::Coerce::ArrayToBool>();
+ add<Value::Coerce::DateToBool>();
+ add<Value::Coerce::RegexToBool>();
+ add<Value::Coerce::TrueToBool>();
+ add<Value::Coerce::FalseToBool>();
+ add<Value::Coerce::NullToBool>();
+ add<Value::Coerce::UndefinedToBool>();
+ add<Value::Coerce::IntToInt>();
+ add<Value::Coerce::LongToInt>();
+ add<Value::Coerce::DoubleToInt>();
+ add<Value::Coerce::NullToInt>();
+ add<Value::Coerce::UndefinedToInt>();
+ add<Value::Coerce::StringToInt>();
+ add<Value::Coerce::IntToLong>();
+ add<Value::Coerce::LongToLong>();
+ add<Value::Coerce::DoubleToLong>();
+ add<Value::Coerce::NullToLong>();
+ add<Value::Coerce::UndefinedToLong>();
+ add<Value::Coerce::StringToLong>();
+ add<Value::Coerce::IntToDouble>();
+ add<Value::Coerce::LongToDouble>();
+ add<Value::Coerce::DoubleToDouble>();
+ add<Value::Coerce::NullToDouble>();
+ add<Value::Coerce::UndefinedToDouble>();
+ add<Value::Coerce::StringToDouble>();
+ add<Value::Coerce::DateToDate>();
+ add<Value::Coerce::TimestampToDate>();
+ add<Value::Coerce::StringToDate>();
+ add<Value::Coerce::DoubleToString>();
+ add<Value::Coerce::IntToString>();
+ add<Value::Coerce::LongToString>();
+ add<Value::Coerce::StringToString>();
+ add<Value::Coerce::TimestampToString>();
+ add<Value::Coerce::DateToString>();
+ add<Value::Coerce::NullToString>();
+ add<Value::Coerce::UndefinedToString>();
+ add<Value::Coerce::DocumentToString>();
+ add<Value::Coerce::TimestampToTimestamp>();
+ add<Value::Coerce::DateToTimestamp>();
+
+ add<Value::GetWidestNumeric>();
+ add<Value::AddToBsonObj>();
+ add<Value::AddToBsonArray>();
+ add<Value::Compare>();
+ add<Value::SubFields>();
+ add<Value::SerializationOfMissingForSorter>();
+ }
+};
+
+SuiteInstance<All> myall;
-} // namespace DocumentTests
+} // namespace DocumentTests
diff --git a/src/mongo/dbtests/executor_registry.cpp b/src/mongo/dbtests/executor_registry.cpp
index d526f5389ae..9dc2675fb63 100644
--- a/src/mongo/dbtests/executor_registry.cpp
+++ b/src/mongo/dbtests/executor_registry.cpp
@@ -47,294 +47,300 @@
namespace ExecutorRegistry {
- using std::auto_ptr;
-
- class ExecutorRegistryBase {
- public:
- ExecutorRegistryBase()
- : _client(&_opCtx)
- {
- _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
- _client.dropCollection(ns());
-
- for (int i = 0; i < N(); ++i) {
- _client.insert(ns(), BSON("foo" << i));
- }
- }
+using std::auto_ptr;
- /**
- * Return a plan executor that is going over the collection in ns().
- */
- PlanExecutor* getCollscan() {
- auto_ptr<WorkingSet> ws(new WorkingSet());
- CollectionScanParams params;
- params.collection = collection();
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = false;
- auto_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, ws.get(), NULL));
-
- // Create a plan executor to hold it
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns(), BSONObj(), &cq).isOK());
- PlanExecutor* exec;
- // Takes ownership of 'ws', 'scan', and 'cq'.
- Status status = PlanExecutor::make(&_opCtx,
- ws.release(),
- scan.release(),
- cq,
- _ctx->ctx().db()->getCollection(ns()),
- PlanExecutor::YIELD_MANUAL,
- &exec);
- ASSERT_OK(status);
- return exec;
- }
+class ExecutorRegistryBase {
+public:
+ ExecutorRegistryBase() : _client(&_opCtx) {
+ _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
+ _client.dropCollection(ns());
- void registerExecutor( PlanExecutor* exec ) {
- WriteUnitOfWork wuow(&_opCtx);
- _ctx->ctx().db()->getOrCreateCollection(&_opCtx, ns())
- ->getCursorManager()
- ->registerExecutor(exec);
- wuow.commit();
+ for (int i = 0; i < N(); ++i) {
+ _client.insert(ns(), BSON("foo" << i));
}
-
- void deregisterExecutor( PlanExecutor* exec ) {
- WriteUnitOfWork wuow(&_opCtx);
- _ctx->ctx().db()->getOrCreateCollection(&_opCtx, ns())
- ->getCursorManager()
- ->deregisterExecutor(exec);
- wuow.commit();
+ }
+
+ /**
+ * Return a plan executor that is going over the collection in ns().
+ */
+ PlanExecutor* getCollscan() {
+ auto_ptr<WorkingSet> ws(new WorkingSet());
+ CollectionScanParams params;
+ params.collection = collection();
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+ auto_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, ws.get(), NULL));
+
+ // Create a plan executor to hold it
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns(), BSONObj(), &cq).isOK());
+ PlanExecutor* exec;
+ // Takes ownership of 'ws', 'scan', and 'cq'.
+ Status status = PlanExecutor::make(&_opCtx,
+ ws.release(),
+ scan.release(),
+ cq,
+ _ctx->ctx().db()->getCollection(ns()),
+ PlanExecutor::YIELD_MANUAL,
+ &exec);
+ ASSERT_OK(status);
+ return exec;
+ }
+
+ void registerExecutor(PlanExecutor* exec) {
+ WriteUnitOfWork wuow(&_opCtx);
+ _ctx->ctx()
+ .db()
+ ->getOrCreateCollection(&_opCtx, ns())
+ ->getCursorManager()
+ ->registerExecutor(exec);
+ wuow.commit();
+ }
+
+ void deregisterExecutor(PlanExecutor* exec) {
+ WriteUnitOfWork wuow(&_opCtx);
+ _ctx->ctx()
+ .db()
+ ->getOrCreateCollection(&_opCtx, ns())
+ ->getCursorManager()
+ ->deregisterExecutor(exec);
+ wuow.commit();
+ }
+
+ int N() {
+ return 50;
+ }
+
+ Collection* collection() {
+ return _ctx->ctx().db()->getCollection(ns());
+ }
+
+ static const char* ns() {
+ return "unittests.ExecutorRegistryDiskLocInvalidation";
+ }
+
+ // Order of these is important for initialization
+ OperationContextImpl _opCtx;
+ auto_ptr<Client::WriteContext> _ctx;
+ DBDirectClient _client;
+};
+
+
+// Test that a registered runner receives invalidation notifications.
+class ExecutorRegistryDiskLocInvalid : public ExecutorRegistryBase {
+public:
+ void run() {
+ if (supportsDocLocking()) {
+ return;
}
- int N() { return 50; }
+ auto_ptr<PlanExecutor> run(getCollscan());
+ BSONObj obj;
- Collection* collection() {
- return _ctx->ctx().db()->getCollection( ns() );
+ // Read some of it.
+ for (int i = 0; i < 10; ++i) {
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- static const char* ns() { return "unittests.ExecutorRegistryDiskLocInvalidation"; }
-
- // Order of these is important for initialization
- OperationContextImpl _opCtx;
- auto_ptr<Client::WriteContext> _ctx;
- DBDirectClient _client;
- };
-
+ // Register it.
+ run->saveState();
+ registerExecutor(run.get());
+ // At this point it's safe to yield. forceYield would do that. Let's now simulate some
+ // stuff going on in the yield.
- // Test that a registered runner receives invalidation notifications.
- class ExecutorRegistryDiskLocInvalid : public ExecutorRegistryBase {
- public:
- void run() {
- if ( supportsDocLocking() ) {
- return;
- }
+ // Delete some data, namely the next 2 things we'd expect.
+ _client.remove(ns(), BSON("foo" << 10));
+ _client.remove(ns(), BSON("foo" << 11));
- auto_ptr<PlanExecutor> run(getCollscan());
- BSONObj obj;
+ // At this point, we're done yielding. We recover our lock.
- // Read some of it.
- for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- }
+ // Unregister the runner.
+ deregisterExecutor(run.get());
- // Register it.
- run->saveState();
- registerExecutor(run.get());
- // At this point it's safe to yield. forceYield would do that. Let's now simulate some
- // stuff going on in the yield.
+ // And clean up anything that happened before.
+ run->restoreState(&_opCtx);
- // Delete some data, namely the next 2 things we'd expect.
- _client.remove(ns(), BSON("foo" << 10));
- _client.remove(ns(), BSON("foo" << 11));
-
- // At this point, we're done yielding. We recover our lock.
-
- // Unregister the runner.
- deregisterExecutor(run.get());
-
- // And clean up anything that happened before.
- run->restoreState(&_opCtx);
-
- // Make sure that the runner moved forward over the deleted data. We don't see foo==10
- // or foo==11.
- for (int i = 12; i < N(); ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- }
-
- ASSERT_EQUALS(PlanExecutor::IS_EOF, run->getNext(&obj, NULL));
+ // Make sure that the runner moved forward over the deleted data. We don't see foo==10
+ // or foo==11.
+ for (int i = 12; i < N(); ++i) {
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- };
- // Test that registered runners are killed when their collection is dropped.
- class ExecutorRegistryDropCollection : public ExecutorRegistryBase {
- public:
- void run() {
- auto_ptr<PlanExecutor> run(getCollscan());
- BSONObj obj;
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, run->getNext(&obj, NULL));
+ }
+};
- // Read some of it.
- for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- }
-
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
-
- // Drop a collection that's not ours.
- _client.dropCollection("unittests.someboguscollection");
-
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState(&_opCtx);
+// Test that registered runners are killed when their collection is dropped.
+class ExecutorRegistryDropCollection : public ExecutorRegistryBase {
+public:
+ void run() {
+ auto_ptr<PlanExecutor> run(getCollscan());
+ BSONObj obj;
+ // Read some of it.
+ for (int i = 0; i < 10; ++i) {
ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(10, obj["foo"].numberInt());
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
+ }
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ // Save state and register.
+ run->saveState();
+ registerExecutor(run.get());
- // Drop our collection.
- _client.dropCollection(ns());
+ // Drop a collection that's not ours.
+ _client.dropCollection("unittests.someboguscollection");
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState(&_opCtx);
+ // Unregister and restore state.
+ deregisterExecutor(run.get());
+ run->restoreState(&_opCtx);
- // PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
- }
- };
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(10, obj["foo"].numberInt());
- // Test that registered runners are killed when all indices are dropped on the collection.
- class ExecutorRegistryDropAllIndices : public ExecutorRegistryBase {
- public:
- void run() {
- auto_ptr<PlanExecutor> run(getCollscan());
- BSONObj obj;
+ // Save state and register.
+ run->saveState();
+ registerExecutor(run.get());
- ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("foo" << 1)));
+ // Drop our collection.
+ _client.dropCollection(ns());
- // Read some of it.
- for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- }
+ // Unregister and restore state.
+ deregisterExecutor(run.get());
+ run->restoreState(&_opCtx);
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ // PlanExecutor was killed.
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ }
+};
- // Drop all indices.
- _client.dropIndexes(ns());
+// Test that registered runners are killed when all indices are dropped on the collection.
+class ExecutorRegistryDropAllIndices : public ExecutorRegistryBase {
+public:
+ void run() {
+ auto_ptr<PlanExecutor> run(getCollscan());
+ BSONObj obj;
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState(&_opCtx);
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("foo" << 1)));
- // PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ // Read some of it.
+ for (int i = 0; i < 10; ++i) {
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- };
- // Test that registered runners are killed when an index is dropped on the collection.
- class ExecutorRegistryDropOneIndex : public ExecutorRegistryBase {
- public:
- void run() {
- auto_ptr<PlanExecutor> run(getCollscan());
- BSONObj obj;
+ // Save state and register.
+ run->saveState();
+ registerExecutor(run.get());
- ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("foo" << 1)));
+ // Drop all indices.
+ _client.dropIndexes(ns());
- // Read some of it.
- for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- }
+ // Unregister and restore state.
+ deregisterExecutor(run.get());
+ run->restoreState(&_opCtx);
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ // PlanExecutor was killed.
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ }
+};
- // Drop a specific index.
- _client.dropIndex(ns(), BSON("foo" << 1));
+// Test that registered runners are killed when an index is dropped on the collection.
+class ExecutorRegistryDropOneIndex : public ExecutorRegistryBase {
+public:
+ void run() {
+ auto_ptr<PlanExecutor> run(getCollscan());
+ BSONObj obj;
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState(&_opCtx);
-
- // PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
- }
- };
-
- // Test that registered runners are killed when their database is dropped.
- class ExecutorRegistryDropDatabase : public ExecutorRegistryBase {
- public:
- void run() {
- auto_ptr<PlanExecutor> run(getCollscan());
- BSONObj obj;
-
- // Read some of it.
- for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- }
-
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
-
- // Drop a DB that's not ours. We can't have a lock at all to do this as dropping a DB
- // requires a "global write lock."
- _ctx.reset();
- _client.dropDatabase("somesillydb");
- _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
-
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState(&_opCtx);
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("foo" << 1)));
+ // Read some of it.
+ for (int i = 0; i < 10; ++i) {
ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
- ASSERT_EQUALS(10, obj["foo"].numberInt());
-
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
+ }
- // Drop our DB. Once again, must give up the lock.
- _ctx.reset();
- _client.dropDatabase("unittests");
- _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
+ // Save state and register.
+ run->saveState();
+ registerExecutor(run.get());
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState(&_opCtx);
- _ctx.reset();
+ // Drop a specific index.
+ _client.dropIndex(ns(), BSON("foo" << 1));
- // PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
- }
- };
+ // Unregister and restore state.
+ deregisterExecutor(run.get());
+ run->restoreState(&_opCtx);
- // TODO: Test that this works with renaming a collection.
+ // PlanExecutor was killed.
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "executor_registry" ) { }
+// Test that registered runners are killed when their database is dropped.
+class ExecutorRegistryDropDatabase : public ExecutorRegistryBase {
+public:
+ void run() {
+ auto_ptr<PlanExecutor> run(getCollscan());
+ BSONObj obj;
- void setupTests() {
- add<ExecutorRegistryDiskLocInvalid>();
- add<ExecutorRegistryDropCollection>();
- add<ExecutorRegistryDropAllIndices>();
- add<ExecutorRegistryDropOneIndex>();
- add<ExecutorRegistryDropDatabase>();
+ // Read some of it.
+ for (int i = 0; i < 10; ++i) {
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- };
- SuiteInstance<All> executorRegistryAll;
+ // Save state and register.
+ run->saveState();
+ registerExecutor(run.get());
+
+ // Drop a DB that's not ours. We can't have a lock at all to do this as dropping a DB
+ // requires a "global write lock."
+ _ctx.reset();
+ _client.dropDatabase("somesillydb");
+ _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
+
+ // Unregister and restore state.
+ deregisterExecutor(run.get());
+ run->restoreState(&_opCtx);
+
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(10, obj["foo"].numberInt());
+
+ // Save state and register.
+ run->saveState();
+ registerExecutor(run.get());
+
+ // Drop our DB. Once again, must give up the lock.
+ _ctx.reset();
+ _client.dropDatabase("unittests");
+ _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
+
+ // Unregister and restore state.
+ deregisterExecutor(run.get());
+ run->restoreState(&_opCtx);
+ _ctx.reset();
+
+ // PlanExecutor was killed.
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ }
+};
+
+// TODO: Test that this works with renaming a collection.
+
+class All : public Suite {
+public:
+ All() : Suite("executor_registry") {}
+
+ void setupTests() {
+ add<ExecutorRegistryDiskLocInvalid>();
+ add<ExecutorRegistryDropCollection>();
+ add<ExecutorRegistryDropAllIndices>();
+ add<ExecutorRegistryDropOneIndex>();
+ add<ExecutorRegistryDropDatabase>();
+ }
+};
+
+SuiteInstance<All> executorRegistryAll;
} // namespace ExecutorRegistry
diff --git a/src/mongo/dbtests/expressiontests.cpp b/src/mongo/dbtests/expressiontests.cpp
index a1c9fd3a7e1..64860cc04fa 100644
--- a/src/mongo/dbtests/expressiontests.cpp
+++ b/src/mongo/dbtests/expressiontests.cpp
@@ -36,3718 +36,4308 @@
namespace ExpressionTests {
- using boost::intrusive_ptr;
- using std::numeric_limits;
- using std::set;
- using std::string;
- using std::vector;
-
- /** Convert BSONObj to a BSONObj with our $const wrappings. */
- static BSONObj constify(const BSONObj& obj, bool parentIsArray=false) {
- BSONObjBuilder bob;
- for (BSONObjIterator itr(obj); itr.more(); itr.next()) {
- BSONElement elem = *itr;
- if (elem.type() == Object) {
- bob << elem.fieldName() << constify(elem.Obj(), false);
- }
- else if (elem.type() == Array && !parentIsArray) {
- // arrays within arrays are treated as constant values by the real parser
- bob << elem.fieldName() << BSONArray(constify(elem.Obj(), true));
- }
- else if (str::equals(elem.fieldName(), "$const") ||
- (elem.type() == mongo::String && elem.valuestrsafe()[0] == '$')) {
- bob.append(elem);
- }
- else {
- bob.append(elem.fieldName(), BSON("$const" << elem));
- }
+using boost::intrusive_ptr;
+using std::numeric_limits;
+using std::set;
+using std::string;
+using std::vector;
+
+/** Convert BSONObj to a BSONObj with our $const wrappings. */
+static BSONObj constify(const BSONObj& obj, bool parentIsArray = false) {
+ BSONObjBuilder bob;
+ for (BSONObjIterator itr(obj); itr.more(); itr.next()) {
+ BSONElement elem = *itr;
+ if (elem.type() == Object) {
+ bob << elem.fieldName() << constify(elem.Obj(), false);
+ } else if (elem.type() == Array && !parentIsArray) {
+ // arrays within arrays are treated as constant values by the real parser
+ bob << elem.fieldName() << BSONArray(constify(elem.Obj(), true));
+ } else if (str::equals(elem.fieldName(), "$const") ||
+ (elem.type() == mongo::String && elem.valuestrsafe()[0] == '$')) {
+ bob.append(elem);
+ } else {
+ bob.append(elem.fieldName(), BSON("$const" << elem));
}
- return bob.obj();
+ }
+ return bob.obj();
+}
+
+/** Check binary equality, ensuring use of the same numeric types. */
+static void assertBinaryEqual(const BSONObj& expected, const BSONObj& actual) {
+ ASSERT_EQUALS(expected, actual);
+ ASSERT(expected.binaryEqual(actual));
+}
+
+/** Convert Value to a wrapped BSONObj with an empty string field name. */
+static BSONObj toBson(const Value& value) {
+ BSONObjBuilder bob;
+ value.addToBsonObj(&bob, "");
+ return bob.obj();
+}
+
+/** Convert Expression to BSON. */
+static BSONObj expressionToBson(const intrusive_ptr<Expression>& expression) {
+ return BSON("" << expression->serialize(false)).firstElement().embeddedObject().getOwned();
+}
+
+/** Convert Document to BSON. */
+static BSONObj toBson(const Document& document) {
+ return document.toBson();
+}
+
+/** Create a Document from a BSONObj. */
+Document fromBson(BSONObj obj) {
+ return Document(obj);
+}
+
+/** Create a Value from a BSONObj. */
+Value valueFromBson(BSONObj obj) {
+ BSONElement element = obj.firstElement();
+ return Value(element);
+}
+
+namespace Add {
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
+ populateOperands(expression);
+ ASSERT_EQUALS(expectedResult(), toBson(expression->evaluate(Document())));
}
- /** Check binary equality, ensuring use of the same numeric types. */
- static void assertBinaryEqual( const BSONObj& expected, const BSONObj& actual ) {
- ASSERT_EQUALS( expected, actual );
- ASSERT( expected.binaryEqual( actual ) );
+protected:
+ virtual void populateOperands(intrusive_ptr<ExpressionNary>& expression) = 0;
+ virtual BSONObj expectedResult() = 0;
+};
+
+/** $add with a NULL Document pointer, as called by ExpressionNary::optimize(). */
+class NullDocument {
+public:
+ void run() {
+ intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
+ expression->addOperand(ExpressionConstant::create(Value(2)));
+ ASSERT_EQUALS(BSON("" << 2), toBson(expression->evaluate(Document())));
}
+};
- /** Convert Value to a wrapped BSONObj with an empty string field name. */
- static BSONObj toBson( const Value& value ) {
- BSONObjBuilder bob;
- value.addToBsonObj( &bob, "" );
- return bob.obj();
+/** $add without operands. */
+class NoOperands : public ExpectedResultBase {
+ void populateOperands(intrusive_ptr<ExpressionNary>& expression) {}
+ virtual BSONObj expectedResult() {
+ return BSON("" << 0);
}
+};
+
+/** String type unsupported. */
+class String {
+public:
+ void run() {
+ intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
+ expression->addOperand(ExpressionConstant::create(Value("a")));
+ ASSERT_THROWS(expression->evaluate(Document()), UserException);
+ }
+};
+
+/** Bool type unsupported. */
+class Bool {
+public:
+ void run() {
+ intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
+ expression->addOperand(ExpressionConstant::create(Value(true)));
+ ASSERT_THROWS(expression->evaluate(Document()), UserException);
+ }
+};
- /** Convert Expression to BSON. */
- static BSONObj expressionToBson( const intrusive_ptr<Expression>& expression ) {
- return BSON("" << expression->serialize(false)).firstElement().embeddedObject().getOwned();
+class SingleOperandBase : public ExpectedResultBase {
+ void populateOperands(intrusive_ptr<ExpressionNary>& expression) {
+ expression->addOperand(ExpressionConstant::create(valueFromBson(operand())));
}
+ BSONObj expectedResult() {
+ return operand();
+ }
+
+protected:
+ virtual BSONObj operand() = 0;
+};
- /** Convert Document to BSON. */
- static BSONObj toBson( const Document& document ) {
- return document.toBson();
+/** Single int argument. */
+class Int : public SingleOperandBase {
+ BSONObj operand() {
+ return BSON("" << 1);
}
-
- /** Create a Document from a BSONObj. */
- Document fromBson( BSONObj obj ) {
- return Document(obj);
+};
+
+/** Single long argument. */
+class Long : public SingleOperandBase {
+ BSONObj operand() {
+ return BSON("" << 5555LL);
}
+};
- /** Create a Value from a BSONObj. */
- Value valueFromBson( BSONObj obj ) {
- BSONElement element = obj.firstElement();
- return Value( element );
+/** Single double argument. */
+class Double : public SingleOperandBase {
+ BSONObj operand() {
+ return BSON("" << 99.99);
}
-
- namespace Add {
+};
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {}
- void run() {
- intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
- populateOperands( expression );
- ASSERT_EQUALS( expectedResult(),
- toBson( expression->evaluate( Document() ) ) );
- }
- protected:
- virtual void populateOperands( intrusive_ptr<ExpressionNary>& expression ) = 0;
- virtual BSONObj expectedResult() = 0;
- };
-
- /** $add with a NULL Document pointer, as called by ExpressionNary::optimize(). */
- class NullDocument {
- public:
- void run() {
- intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
- expression->addOperand( ExpressionConstant::create( Value( 2 ) ) );
- ASSERT_EQUALS( BSON( "" << 2 ), toBson( expression->evaluate( Document() ) ) );
- }
- };
-
- /** $add without operands. */
- class NoOperands : public ExpectedResultBase {
- void populateOperands( intrusive_ptr<ExpressionNary>& expression ) {}
- virtual BSONObj expectedResult() { return BSON( "" << 0 ); }
- };
-
- /** String type unsupported. */
- class String {
- public:
- void run() {
- intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
- expression->addOperand( ExpressionConstant::create( Value( "a" ) ) );
- ASSERT_THROWS( expression->evaluate( Document() ), UserException );
- }
- };
-
- /** Bool type unsupported. */
- class Bool {
- public:
- void run() {
- intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
- expression->addOperand( ExpressionConstant::create( Value(true) ) );
- ASSERT_THROWS( expression->evaluate( Document() ), UserException );
- }
- };
-
- class SingleOperandBase : public ExpectedResultBase {
- void populateOperands( intrusive_ptr<ExpressionNary>& expression ) {
- expression->addOperand( ExpressionConstant::create( valueFromBson( operand() ) ) );
- }
- BSONObj expectedResult() { return operand(); }
- protected:
- virtual BSONObj operand() = 0;
- };
-
- /** Single int argument. */
- class Int : public SingleOperandBase {
- BSONObj operand() { return BSON( "" << 1 ); }
- };
-
- /** Single long argument. */
- class Long : public SingleOperandBase {
- BSONObj operand() { return BSON( "" << 5555LL ); }
- };
-
- /** Single double argument. */
- class Double : public SingleOperandBase {
- BSONObj operand() { return BSON( "" << 99.99 ); }
- };
-
- /** Single Date argument. */
- class Date : public SingleOperandBase {
- BSONObj operand() { return BSON( "" << Date_t(12345) ); }
- };
-
- /** Single null argument. */
- class Null : public SingleOperandBase {
- BSONObj operand() { return BSON( "" << BSONNULL ); }
- BSONObj expectedResult() { return BSON( "" << BSONNULL ); }
- };
-
- /** Single undefined argument. */
- class Undefined : public SingleOperandBase {
- BSONObj operand() { return fromjson( "{'':undefined}" ); }
- BSONObj expectedResult() { return BSON( "" << BSONNULL ); }
- };
-
- class TwoOperandBase : public ExpectedResultBase {
- public:
- TwoOperandBase() :
- _reverse() {
- }
- void run() {
- ExpectedResultBase::run();
- // Now add the operands in the reverse direction.
- _reverse = true;
- ExpectedResultBase::run();
- }
- protected:
- void populateOperands( intrusive_ptr<ExpressionNary>& expression ) {
- expression->addOperand( ExpressionConstant::create
- ( valueFromBson( _reverse ? operand2() : operand1() ) ) );
- expression->addOperand( ExpressionConstant::create
- ( valueFromBson( _reverse ? operand1() : operand2() ) ) );
- }
- virtual BSONObj operand1() = 0;
- virtual BSONObj operand2() = 0;
- private:
- bool _reverse;
- };
-
- /** Add two ints. */
- class IntInt : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << 1 ); }
- BSONObj operand2() { return BSON( "" << 5 ); }
- BSONObj expectedResult() { return BSON( "" << 6 ); }
- };
-
- /** Adding two large ints produces a long, not an overflowed int. */
- class IntIntNoOverflow : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << numeric_limits<int>::max() ); }
- BSONObj operand2() { return BSON( "" << numeric_limits<int>::max() ); }
- BSONObj expectedResult() {
- return BSON( "" << ( (long long)( numeric_limits<int>::max() ) +
- numeric_limits<int>::max() ) );
- }
- };
-
- /** Adding an int and a long produces a long. */
- class IntLong : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << 1 ); }
- BSONObj operand2() { return BSON( "" << 9LL ); }
- BSONObj expectedResult() { return BSON( "" << 10LL ); }
- };
-
- /** Adding an int and a long overflows. */
- class IntLongOverflow : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << numeric_limits<int>::max() ); }
- BSONObj operand2() { return BSON( "" << numeric_limits<long long>::max() ); }
- BSONObj expectedResult() { return BSON( "" << ( numeric_limits<int>::max()
- + numeric_limits<long long>::max() ) ); }
- };
-
- /** Adding an int and a double produces a double. */
- class IntDouble : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << 9 ); }
- BSONObj operand2() { return BSON( "" << 1.1 ); }
- BSONObj expectedResult() { return BSON( "" << 10.1 ); }
- };
-
- /** Adding an int and a Date produces a Date. */
- class IntDate : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << 6 ); }
- BSONObj operand2() { return BSON( "" << Date_t(123450) ); }
- BSONObj expectedResult() { return BSON( "" << Date_t(123456) ); }
- };
-
- /** Adding a long and a double produces a double. */
- class LongDouble : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << 9LL ); }
- BSONObj operand2() { return BSON( "" << 1.1 ); }
- BSONObj expectedResult() { return BSON( "" << 10.1 ); }
- };
-
- /** Adding a long and a double does not overflow. */
- class LongDoubleNoOverflow : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << numeric_limits<long long>::max() ); }
- BSONObj operand2() { return BSON( "" << double( numeric_limits<long long>::max() ) ); }
- BSONObj expectedResult() {
- return BSON( "" << numeric_limits<long long>::max()
- + double( numeric_limits<long long>::max() ) );
- }
- };
-
- /** Adding an int and null. */
- class IntNull : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << 1 ); }
- BSONObj operand2() { return BSON( "" << BSONNULL ); }
- BSONObj expectedResult() { return BSON( "" << BSONNULL ); }
- };
-
- /** Adding a long and undefined. */
- class LongUndefined : public TwoOperandBase {
- BSONObj operand1() { return BSON( "" << 5LL ); }
- BSONObj operand2() { return fromjson( "{'':undefined}" ); }
- BSONObj expectedResult() { return BSON( "" << BSONNULL ); }
- };
-
- } // namespace Add
-
- namespace And {
-
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {
- }
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- ASSERT_EQUALS( BSON( "" << expectedResult() ),
- toBson( expression->evaluate( fromBson( BSON( "a" << 1 ) ) ) ) );
- intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS( BSON( "" << expectedResult() ),
- toBson( optimized->evaluate( fromBson( BSON( "a" << 1 ) ) ) ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual bool expectedResult() = 0;
- };
-
- class OptimizeBase {
- public:
- virtual ~OptimizeBase() {
- }
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS( expectedOptimized(), expressionToBson( optimized ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual BSONObj expectedOptimized() = 0;
- };
-
- class NoOptimizeBase : public OptimizeBase {
- BSONObj expectedOptimized() { return constify( spec() ); }
- };
-
- /** $and without operands. */
- class NoOperands : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSONArray() ); }
- bool expectedResult() { return true; }
- };
-
- /** $and passed 'true'. */
- class True : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( true ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $and passed 'false'. */
- class False : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $and passed 'true', 'true'. */
- class TrueTrue : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( true << true ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $and passed 'true', 'false'. */
- class TrueFalse : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( true << false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $and passed 'false', 'true'. */
- class FalseTrue : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( false << true ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $and passed 'false', 'false'. */
- class FalseFalse : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( false << false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $and passed 'true', 'true', 'true'. */
- class TrueTrueTrue : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( true << true << true ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $and passed 'true', 'true', 'false'. */
- class TrueTrueFalse : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( true << true << false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $and passed '0', '1'. */
- class ZeroOne : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 0 << 1 ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $and passed '1', '2'. */
- class OneTwo : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 1 << 2 ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $and passed a field path. */
- class FieldPath : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( "$a" ) ); }
- bool expectedResult() { return true; }
- };
-
- /** A constant expression is optimized to a constant. */
- class OptimizeConstantExpression : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 1 ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- /** A non constant expression is not optimized. */
- class NonConstant : public NoOptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( "$a" ) ); }
- };
-
- /** An expression beginning with a single constant is optimized. */
- class ConstantNonConstantTrue : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 1 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY( "$a" ) ); }
- // note: using $and as serialization of ExpressionCoerceToBool rather than ExpressionAnd
- };
-
- class ConstantNonConstantFalse : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 0 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << false ); }
- };
-
- /** An expression with a field path and '1'. */
- class NonConstantOne : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( "$a" << 1 ) ); }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY( "$a" ) ); }
- };
-
- /** An expression with a field path and '0'. */
- class NonConstantZero : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( "$a" << 0 ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << false ); }
- };
-
- /** An expression with two field paths and '1'. */
- class NonConstantNonConstantOne : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( "$a" << "$b" << 1 ) ); }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY( "$a" << "$b" ) ); }
- };
-
- /** An expression with two field paths and '0'. */
- class NonConstantNonConstantZero : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( "$a" << "$b" << 0 ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << false ); }
- };
-
- /** An expression with '0', '1', and a field path. */
- class ZeroOneNonConstant : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 0 << 1 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << false ); }
- };
-
- /** An expression with '1', '1', and a field path. */
- class OneOneNonConstant : public OptimizeBase {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 1 << 1 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY( "$a" ) ); }
- };
-
- /** Nested $and expressions. */
- class Nested : public OptimizeBase {
- BSONObj spec() {
- return BSON( "$and" <<
- BSON_ARRAY( 1 << BSON( "$and" << BSON_ARRAY( 1 ) ) << "$a" << "$b" ) );
- }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY( "$a" << "$b" ) ); }
- };
-
- /** Nested $and expressions containing a nested value evaluating to false. */
- class NestedZero : public OptimizeBase {
- BSONObj spec() {
- return BSON( "$and" <<
- BSON_ARRAY( 1 <<
- BSON( "$and" <<
- BSON_ARRAY( BSON( "$and" <<
- BSON_ARRAY( 0 ) ) ) ) <<
- "$a" << "$b" ) );
- }
- BSONObj expectedOptimized() { return BSON( "$const" << false ); }
- };
-
- } // namespace And
-
- namespace CoerceToBool {
-
- /** Nested expression coerced to true. */
- class EvaluateTrue {
- public:
- void run() {
- intrusive_ptr<Expression> nested =
- ExpressionConstant::create( Value( 5 ) );
- intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create( nested );
- ASSERT( expression->evaluate( Document() ).getBool() );
- }
- };
-
- /** Nested expression coerced to false. */
- class EvaluateFalse {
- public:
- void run() {
- intrusive_ptr<Expression> nested =
- ExpressionConstant::create( Value( 0 ) );
- intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create( nested );
- ASSERT( !expression->evaluate( Document() ).getBool() );
- }
- };
-
- /** Dependencies forwarded from nested expression. */
- class Dependencies {
- public:
- void run() {
- intrusive_ptr<Expression> nested = ExpressionFieldPath::create( "a.b" );
- intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create( nested );
- DepsTracker dependencies;
- expression->addDependencies( &dependencies );
- ASSERT_EQUALS( 1U, dependencies.fields.size() );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "a.b" ) );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
+/** Single Date argument. */
+class Date : public SingleOperandBase {
+ BSONObj operand() {
+ return BSON("" << Date_t(12345));
+ }
+};
- /** Output to BSONObj. */
- class AddToBsonObj {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create(
- ExpressionFieldPath::create("foo"));
+/** Single null argument. */
+class Null : public SingleOperandBase {
+ BSONObj operand() {
+ return BSON("" << BSONNULL);
+ }
+ BSONObj expectedResult() {
+ return BSON("" << BSONNULL);
+ }
+};
- // serialized as $and because CoerceToBool isn't an ExpressionNary
- assertBinaryEqual(fromjson("{field:{$and:['$foo']}}"), toBsonObj(expression));
- }
- private:
- static BSONObj toBsonObj(const intrusive_ptr<Expression>& expression) {
- return BSON("field" << expression->serialize(false));
- }
- };
+/** Single undefined argument. */
+class Undefined : public SingleOperandBase {
+ BSONObj operand() {
+ return fromjson("{'':undefined}");
+ }
+ BSONObj expectedResult() {
+ return BSON("" << BSONNULL);
+ }
+};
+
+class TwoOperandBase : public ExpectedResultBase {
+public:
+ TwoOperandBase() : _reverse() {}
+ void run() {
+ ExpectedResultBase::run();
+ // Now add the operands in the reverse direction.
+ _reverse = true;
+ ExpectedResultBase::run();
+ }
- /** Output to BSONArray. */
- class AddToBsonArray {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create(
- ExpressionFieldPath::create("foo"));
+protected:
+ void populateOperands(intrusive_ptr<ExpressionNary>& expression) {
+ expression->addOperand(
+ ExpressionConstant::create(valueFromBson(_reverse ? operand2() : operand1())));
+ expression->addOperand(
+ ExpressionConstant::create(valueFromBson(_reverse ? operand1() : operand2())));
+ }
+ virtual BSONObj operand1() = 0;
+ virtual BSONObj operand2() = 0;
- // serialized as $and because CoerceToBool isn't an ExpressionNary
- assertBinaryEqual(BSON_ARRAY(fromjson("{$and:['$foo']}")), toBsonArray(expression));
- }
- private:
- static BSONArray toBsonArray(const intrusive_ptr<Expression>& expression) {
- BSONArrayBuilder bab;
- bab << expression->serialize(false);
- return bab.arr();
- }
- };
+private:
+ bool _reverse;
+};
+/** Add two ints. */
+class IntInt : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << 1);
+ }
+ BSONObj operand2() {
+ return BSON("" << 5);
+ }
+ BSONObj expectedResult() {
+ return BSON("" << 6);
+ }
+};
- // TODO Test optimize(), difficult because a CoerceToBool cannot be output as BSON.
-
- } // namespace CoerceToBool
+/** Adding two large ints produces a long, not an overflowed int. */
+class IntIntNoOverflow : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << numeric_limits<int>::max());
+ }
+ BSONObj operand2() {
+ return BSON("" << numeric_limits<int>::max());
+ }
+ BSONObj expectedResult() {
+ return BSON("" << ((long long)(numeric_limits<int>::max()) + numeric_limits<int>::max()));
+ }
+};
- namespace Compare {
+/** Adding an int and a long produces a long. */
+class IntLong : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << 1);
+ }
+ BSONObj operand2() {
+ return BSON("" << 9LL);
+ }
+ BSONObj expectedResult() {
+ return BSON("" << 10LL);
+ }
+};
- class OptimizeBase {
- public:
- virtual ~OptimizeBase() {
- }
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS( constify( expectedOptimized() ), expressionToBson( optimized ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual BSONObj expectedOptimized() = 0;
- };
-
- class FieldRangeOptimize : public OptimizeBase {
- BSONObj expectedOptimized() { return spec(); }
- };
-
- class NoOptimize : public OptimizeBase {
- BSONObj expectedOptimized() { return spec(); }
- };
-
- /** Check expected result for expressions depending on constants. */
- class ExpectedResultBase : public OptimizeBase {
- public:
- void run() {
- OptimizeBase::run();
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- // Check expression spec round trip.
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- // Check evaluation result.
- ASSERT_EQUALS( expectedResult(),
- toBson( expression->evaluate( Document() ) ) );
- // Check that the result is the same after optimizing.
- intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS( expectedResult(),
- toBson( optimized->evaluate( Document() ) ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual BSONObj expectedResult() = 0;
- private:
- virtual BSONObj expectedOptimized() {
- return BSON( "$const" << expectedResult().firstElement() );
- }
- };
-
- class ExpectedTrue : public ExpectedResultBase {
- BSONObj expectedResult() { return BSON( "" << true ); }
- };
-
- class ExpectedFalse : public ExpectedResultBase {
- BSONObj expectedResult() { return BSON( "" << false ); }
- };
-
- class ParseError {
- public:
- virtual ~ParseError() {
- }
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- ASSERT_THROWS( Expression::parseOperand(specElement, vps), UserException );
- }
- protected:
- virtual BSONObj spec() = 0;
- };
-
- /** $eq with first < second. */
- class EqLt : public ExpectedFalse {
- BSONObj spec() { return BSON( "$eq" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** $eq with first == second. */
- class EqEq : public ExpectedTrue {
- BSONObj spec() { return BSON( "$eq" << BSON_ARRAY( 1 << 1 ) ); }
- };
-
- /** $eq with first > second. */
- class EqGt : public ExpectedFalse {
- BSONObj spec() { return BSON( "$eq" << BSON_ARRAY( 1 << 0 ) ); }
- };
-
- /** $ne with first < second. */
- class NeLt : public ExpectedTrue {
- BSONObj spec() { return BSON( "$ne" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** $ne with first == second. */
- class NeEq : public ExpectedFalse {
- BSONObj spec() { return BSON( "$ne" << BSON_ARRAY( 1 << 1 ) ); }
- };
-
- /** $ne with first > second. */
- class NeGt : public ExpectedTrue {
- BSONObj spec() { return BSON( "$ne" << BSON_ARRAY( 1 << 0 ) ); }
- };
-
- /** $gt with first < second. */
- class GtLt : public ExpectedFalse {
- BSONObj spec() { return BSON( "$gt" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** $gt with first == second. */
- class GtEq : public ExpectedFalse {
- BSONObj spec() { return BSON( "$gt" << BSON_ARRAY( 1 << 1 ) ); }
- };
-
- /** $gt with first > second. */
- class GtGt : public ExpectedTrue {
- BSONObj spec() { return BSON( "$gt" << BSON_ARRAY( 1 << 0 ) ); }
- };
-
- /** $gte with first < second. */
- class GteLt : public ExpectedFalse {
- BSONObj spec() { return BSON( "$gte" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** $gte with first == second. */
- class GteEq : public ExpectedTrue {
- BSONObj spec() { return BSON( "$gte" << BSON_ARRAY( 1 << 1 ) ); }
- };
-
- /** $gte with first > second. */
- class GteGt : public ExpectedTrue {
- BSONObj spec() { return BSON( "$gte" << BSON_ARRAY( 1 << 0 ) ); }
- };
-
- /** $lt with first < second. */
- class LtLt : public ExpectedTrue {
- BSONObj spec() { return BSON( "$lt" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** $lt with first == second. */
- class LtEq : public ExpectedFalse {
- BSONObj spec() { return BSON( "$lt" << BSON_ARRAY( 1 << 1 ) ); }
- };
-
- /** $lt with first > second. */
- class LtGt : public ExpectedFalse {
- BSONObj spec() { return BSON( "$lt" << BSON_ARRAY( 1 << 0 ) ); }
- };
-
- /** $lte with first < second. */
- class LteLt : public ExpectedTrue {
- BSONObj spec() { return BSON( "$lte" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** $lte with first == second. */
- class LteEq : public ExpectedTrue {
- BSONObj spec() { return BSON( "$lte" << BSON_ARRAY( 1 << 1 ) ); }
- };
-
- /** $lte with first > second. */
- class LteGt : public ExpectedFalse {
- BSONObj spec() { return BSON( "$lte" << BSON_ARRAY( 1 << 0 ) ); }
- };
-
- /** $cmp with first < second. */
- class CmpLt : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$cmp" << BSON_ARRAY( 1 << 2 ) ); }
- BSONObj expectedResult() { return BSON( "" << -1 ); }
- };
-
- /** $cmp with first == second. */
- class CmpEq : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$cmp" << BSON_ARRAY( 1 << 1 ) ); }
- BSONObj expectedResult() { return BSON( "" << 0 ); }
- };
-
- /** $cmp with first > second. */
- class CmpGt : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$cmp" << BSON_ARRAY( 1 << 0 ) ); }
- BSONObj expectedResult() { return BSON( "" << 1 ); }
- };
-
- /** $cmp results are bracketed to an absolute value of 1. */
- class CmpBracketed : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$cmp" << BSON_ARRAY( "z" << "a" ) ); }
- BSONObj expectedResult() { return BSON( "" << 1 ); }
- };
-
- /** Zero operands provided. */
- class ZeroOperands : public ParseError {
- BSONObj spec() { return BSON( "$ne" << BSONArray() ); }
- };
-
- /** One operand provided. */
- class OneOperand : public ParseError {
- BSONObj spec() { return BSON( "$eq" << BSON_ARRAY( 1 ) ); }
- };
-
- /** Three operands provided. */
- class ThreeOperands : public ParseError {
- BSONObj spec() { return BSON( "$gt" << BSON_ARRAY( 2 << 3 << 4 ) ); }
- };
-
- /** Incompatible types can be compared. */
- class IncompatibleTypes {
- public:
- void run() {
- BSONObj specObject = BSON( "" << BSON( "$ne" << BSON_ARRAY( "a" << 1 ) ) );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS(expression->evaluate(Document()), Value(true));
- }
- };
-
- /**
- * An expression depending on constants is optimized to a constant via
- * ExpressionNary::optimize().
- */
- class OptimizeConstants : public OptimizeBase {
- BSONObj spec() { return BSON( "$eq" << BSON_ARRAY( 1 << 1 ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- /** $cmp is not optimized. */
- class NoOptimizeCmp : public NoOptimize {
- BSONObj spec() { return BSON( "$cmp" << BSON_ARRAY( 1 << "$a" ) ); }
- };
-
- /** $ne is not optimized. */
- class NoOptimizeNe : public NoOptimize {
- BSONObj spec() { return BSON( "$ne" << BSON_ARRAY( 1 << "$a" ) ); }
- };
-
- /** No optimization is performend without a constant. */
- class NoOptimizeNoConstant : public NoOptimize {
- BSONObj spec() { return BSON( "$ne" << BSON_ARRAY( "$a" << "$b" ) ); }
- };
-
- /** No optimization is performend without an immediate field path. */
- class NoOptimizeWithoutFieldPath : public NoOptimize {
- BSONObj spec() {
- return BSON( "$eq" << BSON_ARRAY( BSON( "$and" << BSON_ARRAY( "$a" ) ) << 1 ) );
- }
- };
-
- /** No optimization is performend without an immediate field path. */
- class NoOptimizeWithoutFieldPathReverse : public NoOptimize {
- BSONObj spec() {
- return BSON( "$eq" << BSON_ARRAY( 1 << BSON( "$and" << BSON_ARRAY( "$a" ) ) ) );
- }
- };
-
- /** An equality expression is optimized. */
- class OptimizeEq : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$eq" << BSON_ARRAY( "$a" << 1 ) ); }
- };
-
- /** A reverse sense equality expression is optimized. */
- class OptimizeEqReverse : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$eq" << BSON_ARRAY( 1 << "$a" ) ); }
- };
-
- /** A $lt expression is optimized. */
- class OptimizeLt : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$lt" << BSON_ARRAY( "$a" << 1 ) ); }
- };
-
- /** A reverse sense $lt expression is optimized. */
- class OptimizeLtReverse : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$lt" << BSON_ARRAY( 1 << "$a" ) ); }
- };
-
- /** A $lte expression is optimized. */
- class OptimizeLte : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$lte" << BSON_ARRAY( "$b" << 2 ) ); }
- };
-
- /** A reverse sense $lte expression is optimized. */
- class OptimizeLteReverse : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$lte" << BSON_ARRAY( 2 << "$b" ) ); }
- };
-
- /** A $gt expression is optimized. */
- class OptimizeGt : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$gt" << BSON_ARRAY( "$b" << 2 ) ); }
- };
-
- /** A reverse sense $gt expression is optimized. */
- class OptimizeGtReverse : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$gt" << BSON_ARRAY( 2 << "$b" ) ); }
- };
-
- /** A $gte expression is optimized. */
- class OptimizeGte : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$gte" << BSON_ARRAY( "$b" << 2 ) ); }
- };
-
- /** A reverse sense $gte expression is optimized. */
- class OptimizeGteReverse : public FieldRangeOptimize {
- BSONObj spec() { return BSON( "$gte" << BSON_ARRAY( 2 << "$b" ) ); }
- };
-
- } // namespace Compare
-
- namespace Constant {
-
- /** Create an ExpressionConstant from a Value. */
- class Create {
- public:
- void run() {
- intrusive_ptr<Expression> expression =
- ExpressionConstant::create( Value( 5 ) );
- assertBinaryEqual( BSON( "" << 5 ),
- toBson( expression->evaluate( Document() ) ) );
- }
- };
-
- /** Create an ExpressionConstant from a BsonElement. */
- class CreateFromBsonElement {
- public:
- void run() {
- BSONObj spec = BSON( "IGNORED_FIELD_NAME" << "foo" );
- BSONElement specElement = spec.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression =
- ExpressionConstant::parse( specElement, vps );
- assertBinaryEqual( BSON( "" << "foo" ),
- toBson( expression->evaluate( Document() ) ) );
- }
- };
-
- /** No optimization is performed. */
- class Optimize {
- public:
- void run() {
- intrusive_ptr<Expression> expression =
- ExpressionConstant::create( Value( 5 ) );
- // An attempt to optimize returns the Expression itself.
- ASSERT_EQUALS( expression, expression->optimize() );
- }
- };
-
- /** No dependencies. */
- class Dependencies {
- public:
- void run() {
- intrusive_ptr<Expression> expression =
- ExpressionConstant::create( Value( 5 ) );
- DepsTracker dependencies;
- expression->addDependencies( &dependencies );
- ASSERT_EQUALS( 0U, dependencies.fields.size() );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
-
- /** Output to BSONObj. */
- class AddToBsonObj {
- public:
- void run() {
- intrusive_ptr<Expression> expression =
- ExpressionConstant::create( Value( 5 ) );
- // The constant is replaced with a $ expression.
- assertBinaryEqual( BSON( "field" << BSON( "$const" << 5 ) ),
- toBsonObj( expression ) );
- }
- private:
- static BSONObj toBsonObj( const intrusive_ptr<Expression>& expression ) {
- return BSON("field" << expression->serialize(false));
- }
- };
-
- /** Output to BSONArray. */
- class AddToBsonArray {
- public:
- void run() {
- intrusive_ptr<Expression> expression =
- ExpressionConstant::create( Value( 5 ) );
- // The constant is copied out as is.
- assertBinaryEqual( constify( BSON_ARRAY( 5 ) ), toBsonArray( expression ) );
- }
- private:
- static BSONObj toBsonArray( const intrusive_ptr<Expression>& expression ) {
- BSONArrayBuilder bab;
- bab << expression->serialize(false);
- return bab.obj();
- }
- };
-
- } // namespace Constant
-
- namespace FieldPath {
-
- /** The provided field path does not pass validation. */
- class Invalid {
- public:
- void run() {
- ASSERT_THROWS( ExpressionFieldPath::create( "" ), UserException );
- }
- };
-
- /** No optimization is performed. */
- class Optimize {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a" );
- // An attempt to optimize returns the Expression itself.
- ASSERT_EQUALS( expression, expression->optimize() );
- }
- };
-
- /** The field path itself is a dependency. */
- class Dependencies {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- DepsTracker dependencies;
- expression->addDependencies( &dependencies );
- ASSERT_EQUALS( 1U, dependencies.fields.size() );
- ASSERT_EQUALS( 1U, dependencies.fields.count( "a.b" ) );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
-
- /** Field path target field is missing. */
- class Missing {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a" );
- assertBinaryEqual( fromjson( "{}" ),
- toBson( expression->evaluate( Document() ) ) );
- }
- };
-
- /** Simple case where the target field is present. */
- class Present {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a" );
- assertBinaryEqual( fromjson( "{'':123}" ),
- toBson( expression->evaluate
- ( fromBson( BSON( "a" << 123 ) ) ) ) );
- }
- };
-
- /** Target field parent is null. */
- class NestedBelowNull {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson( "{a:null}" ) ) ) ) );
- }
- };
-
- /** Target field parent is undefined. */
- class NestedBelowUndefined {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson( "{a:undefined}" ) ) ) ) );
- }
- };
-
- /** Target field parent is missing. */
- class NestedBelowMissing {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson( "{z:1}" ) ) ) ) );
- }
- };
-
- /** Target field parent is an integer. */
- class NestedBelowInt {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{}" ),
- toBson( expression->evaluate
- ( fromBson( BSON( "a" << 2 ) ) ) ) );
- }
- };
-
- /** A value in a nested object. */
- class NestedValue {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( BSON( "" << 55 ),
- toBson( expression->evaluate
- ( fromBson( BSON( "a" << BSON( "b" << 55 ) ) ) ) ) );
- }
- };
-
- /** Target field within an empty object. */
- class NestedBelowEmptyObject {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{}" ),
- toBson( expression->evaluate
- ( fromBson( BSON( "a" << BSONObj() ) ) ) ) );
- }
- };
-
- /** Target field within an empty array. */
- class NestedBelowEmptyArray {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( BSON( "" << BSONArray() ),
- toBson( expression->evaluate
- ( fromBson( BSON( "a" << BSONArray() ) ) ) ) );
- }
- };
-
- /** Target field within an array containing null. */
- class NestedBelowArrayWithNull {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{'':[]}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson( "{a:[null]}" ) ) ) ) );
- }
- };
-
- /** Target field within an array containing undefined. */
- class NestedBelowArrayWithUndefined {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{'':[]}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson( "{a:[undefined]}" ) ) ) ) );
- }
- };
-
- /** Target field within an array containing an integer. */
- class NestedBelowArrayWithInt {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{'':[]}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson( "{a:[1]}" ) ) ) ) );
- }
- };
-
- /** Target field within an array. */
- class NestedWithinArray {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{'':[9]}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson( "{a:[{b:9}]}" ) ) ) ) );
- }
- };
-
- /** Multiple value types within an array. */
- class MultipleArrayValues {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b" );
- assertBinaryEqual( fromjson( "{'':[9,20]}" ),
- toBson( expression->evaluate
- ( fromBson( fromjson
- ( "{a:[{b:9},null,undefined,{g:4},{b:20},{}]}"
- ) ) ) ) );
- }
- };
-
- /** Expanding values within nested arrays. */
- class ExpandNestedArrays {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b.c" );
- assertBinaryEqual( fromjson( "{'':[[1,2],3,[4],[[5]],[6,7]]}" ),
- toBson
- ( expression->evaluate
- ( fromBson
- ( fromjson( "{a:[{b:[{c:1},{c:2}]},"
- "{b:{c:3}},"
- "{b:[{c:4}]},"
- "{b:[{c:[5]}]},"
- "{b:{c:[6,7]}}]}" ) ) ) ) );
- }
- };
-
- /** Add to a BSONObj. */
- class AddToBsonObj {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b.c" );
- assertBinaryEqual(BSON("foo" << "$a.b.c"),
- BSON("foo" << expression->serialize(false)));
- }
- };
-
- /** Add to a BSONArray. */
- class AddToBsonArray {
- public:
- void run() {
- intrusive_ptr<Expression> expression = ExpressionFieldPath::create( "a.b.c" );
- BSONArrayBuilder bab;
- bab << expression->serialize(false);
- assertBinaryEqual( BSON_ARRAY( "$a.b.c" ), bab.arr() );
- }
- };
-
- } // namespace FieldPath
-
-
- namespace Nary {
-
- /** A dummy child of ExpressionNary used for testing. */
- class Testable : public ExpressionNary {
- public:
- virtual Value evaluateInternal(Variables* vars) const {
- // Just put all the values in a list. This is not associative/commutative so
- // the results will change if a factory is provided and operations are reordered.
- vector<Value> values;
- for( ExpressionVector::const_iterator i = vpOperand.begin(); i != vpOperand.end();
- ++i ) {
- values.push_back( (*i)->evaluateInternal(vars) );
- }
- return Value( values );
- }
- virtual const char* getOpName() const { return "$testable"; }
- virtual bool isAssociativeAndCommutative() const {
- return _isAssociativeAndCommutative;
- }
- static intrusive_ptr<Testable> create( bool associativeAndCommutative = false ) {
- return new Testable(associativeAndCommutative);
- }
- static intrusive_ptr<ExpressionNary> factory() {
- return new Testable(true);
- }
- static intrusive_ptr<Testable> createFromOperands( const BSONArray& operands,
- bool haveFactory = false ) {
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Testable> testable = create( haveFactory );
- BSONObjIterator i( operands );
- while( i.more() ) {
- BSONElement element = i.next();
- testable->addOperand( Expression::parseOperand(element, vps) );
- }
- return testable;
- }
- void assertContents( const BSONArray& expectedContents ) {
- ASSERT_EQUALS( constify( BSON( "$testable" << expectedContents ) ), expressionToBson( this ) );
- }
+/** Adding an int and a long overflows. */
+class IntLongOverflow : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << numeric_limits<int>::max());
+ }
+ BSONObj operand2() {
+ return BSON("" << numeric_limits<long long>::max());
+ }
+ BSONObj expectedResult() {
+ return BSON("" << (numeric_limits<int>::max() + numeric_limits<long long>::max()));
+ }
+};
- private:
- Testable(bool isAssociativeAndCommutative)
- : _isAssociativeAndCommutative(isAssociativeAndCommutative)
- {}
- bool _isAssociativeAndCommutative;
- };
-
- /** Adding operands to the expression. */
- class AddOperand {
- public:
- void run() {
- intrusive_ptr<Testable> testable = Testable::create();
- testable->addOperand( ExpressionConstant::create( Value( 9 ) ) );
- testable->assertContents( BSON_ARRAY( 9 ) );
- testable->addOperand( ExpressionFieldPath::create( "ab.c" ) );
- testable->assertContents( BSON_ARRAY( 9 << "$ab.c" ) );
- }
- };
+/** Adding an int and a double produces a double. */
+class IntDouble : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << 9);
+ }
+ BSONObj operand2() {
+ return BSON("" << 1.1);
+ }
+ BSONObj expectedResult() {
+ return BSON("" << 10.1);
+ }
+};
- /** Dependencies of the expression. */
- class Dependencies {
- public:
- void run() {
- intrusive_ptr<Testable> testable = Testable::create();
+/** Adding an int and a Date produces a Date. */
+class IntDate : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << 6);
+ }
+ BSONObj operand2() {
+ return BSON("" << Date_t(123450));
+ }
+ BSONObj expectedResult() {
+ return BSON("" << Date_t(123456));
+ }
+};
- // No arguments.
- assertDependencies( BSONArray(), testable );
+/** Adding a long and a double produces a double. */
+class LongDouble : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << 9LL);
+ }
+ BSONObj operand2() {
+ return BSON("" << 1.1);
+ }
+ BSONObj expectedResult() {
+ return BSON("" << 10.1);
+ }
+};
- // Add a constant argument.
- testable->addOperand( ExpressionConstant::create( Value( 1 ) ) );
- assertDependencies( BSONArray(), testable );
+/** Adding a long and a double does not overflow. */
+class LongDoubleNoOverflow : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << numeric_limits<long long>::max());
+ }
+ BSONObj operand2() {
+ return BSON("" << double(numeric_limits<long long>::max()));
+ }
+ BSONObj expectedResult() {
+ return BSON("" << numeric_limits<long long>::max() +
+ double(numeric_limits<long long>::max()));
+ }
+};
- // Add a field path argument.
- testable->addOperand( ExpressionFieldPath::create( "ab.c" ) );
- assertDependencies( BSON_ARRAY( "ab.c" ), testable );
+/** Adding an int and null. */
+class IntNull : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << 1);
+ }
+ BSONObj operand2() {
+ return BSON("" << BSONNULL);
+ }
+ BSONObj expectedResult() {
+ return BSON("" << BSONNULL);
+ }
+};
- // Add an object expression.
- BSONObj spec = BSON( "" << BSON( "a" << "$x" << "q" << "$r" ) );
- BSONElement specElement = spec.firstElement();
- Expression::ObjectCtx ctx( Expression::ObjectCtx::DOCUMENT_OK );
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- testable->addOperand( Expression::parseObject(specElement.Obj(), &ctx, vps) );
- assertDependencies( BSON_ARRAY( "ab.c" << "r" << "x" ), testable );
- }
- private:
- void assertDependencies( const BSONArray& expectedDependencies,
- const intrusive_ptr<Expression>& expression ) {
- DepsTracker dependencies;
- expression->addDependencies( &dependencies );
- BSONArrayBuilder dependenciesBson;
- for( set<string>::const_iterator i = dependencies.fields.begin();
- i != dependencies.fields.end();
- ++i ) {
- dependenciesBson << *i;
- }
- ASSERT_EQUALS( expectedDependencies, dependenciesBson.arr() );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
-
- /** Serialize to an object. */
- class AddToBsonObj {
- public:
- void run() {
- intrusive_ptr<Testable> testable = Testable::create();
- testable->addOperand( ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_EQUALS(BSON("foo" << BSON("$testable" << BSON_ARRAY(BSON("$const" << 5)))),
- BSON("foo" << testable->serialize(false)));
- }
- };
-
- /** Serialize to an array. */
- class AddToBsonArray {
- public:
- void run() {
- intrusive_ptr<Testable> testable = Testable::create();
- testable->addOperand( ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_EQUALS(constify(BSON_ARRAY(BSON("$testable" << BSON_ARRAY(5)))),
- BSON_ARRAY(testable->serialize(false)));
- }
- };
-
- /** One operand is optimized to a constant, while another is left as is. */
- class OptimizeOneOperand {
- public:
- void run() {
- BSONArray spec = BSON_ARRAY( BSON( "$and" << BSONArray() ) << "$abc" );
- intrusive_ptr<Testable> testable = Testable::createFromOperands( spec );
- testable->assertContents( spec );
- ASSERT( testable == testable->optimize() );
- testable->assertContents( BSON_ARRAY( true << "$abc" ) );
- }
- };
-
- /** All operands are constants, and the operator is evaluated with them. */
- class EvaluateAllConstantOperands {
- public:
- void run() {
- BSONArray spec = BSON_ARRAY( 1 << 2 );
- intrusive_ptr<Testable> testable = Testable::createFromOperands( spec );
- testable->assertContents( spec );
- intrusive_ptr<Expression> optimized = testable->optimize();
- ASSERT( testable != optimized );
- ASSERT_EQUALS( BSON( "$const" << BSON_ARRAY( 1 << 2 ) ),
- expressionToBson( optimized ) );
- }
- };
+/** Adding a long and undefined. */
+class LongUndefined : public TwoOperandBase {
+ BSONObj operand1() {
+ return BSON("" << 5LL);
+ }
+ BSONObj operand2() {
+ return fromjson("{'':undefined}");
+ }
+ BSONObj expectedResult() {
+ return BSON("" << BSONNULL);
+ }
+};
+
+} // namespace Add
+
+namespace And {
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_EQUALS(BSON("" << expectedResult()),
+ toBson(expression->evaluate(fromBson(BSON("a" << 1)))));
+ intrusive_ptr<Expression> optimized = expression->optimize();
+ ASSERT_EQUALS(BSON("" << expectedResult()),
+ toBson(optimized->evaluate(fromBson(BSON("a" << 1)))));
+ }
- class NoFactoryOptimizeBase {
- public:
- virtual ~NoFactoryOptimizeBase() {
- }
- void run() {
- intrusive_ptr<Testable> testable = createTestable();
- // Without factory optimization, optimization will not produce a new expression.
- ASSERT( testable == testable->optimize() );
- }
- protected:
- virtual intrusive_ptr<Testable> createTestable() = 0;
- };
-
- /** A string constant prevents factory optimization. */
- class StringConstant : public NoFactoryOptimizeBase {
- intrusive_ptr<Testable> createTestable() {
- return Testable::createFromOperands( BSON_ARRAY( "abc" << "def" << "$path" ),
- true );
- }
- };
+protected:
+ virtual BSONObj spec() = 0;
+ virtual bool expectedResult() = 0;
+};
+
+class OptimizeBase {
+public:
+ virtual ~OptimizeBase() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ intrusive_ptr<Expression> optimized = expression->optimize();
+ ASSERT_EQUALS(expectedOptimized(), expressionToBson(optimized));
+ }
- /** A single (instead of multiple) constant prevents optimization. SERVER-6192 */
- class SingleConstant : public NoFactoryOptimizeBase {
- intrusive_ptr<Testable> createTestable() {
- return Testable::createFromOperands( BSON_ARRAY( 55 << "$path" ), true );
- }
- };
+protected:
+ virtual BSONObj spec() = 0;
+ virtual BSONObj expectedOptimized() = 0;
+};
- /** Factory optimization is not used without a factory. */
- class NoFactory : public NoFactoryOptimizeBase {
- intrusive_ptr<Testable> createTestable() {
- return Testable::createFromOperands( BSON_ARRAY( 55 << 66 << "$path" ), false );
- }
- };
-
- /** Factory optimization separates constant from non constant expressions. */
- class FactoryOptimize {
- public:
- void run() {
- intrusive_ptr<Testable> testable =
- Testable::createFromOperands( BSON_ARRAY( 55 << 66 << "$path" ), true );
- intrusive_ptr<Expression> optimized = testable->optimize();
- // The constant expressions are evaluated separately and placed at the end.
- ASSERT_EQUALS( constify( BSON( "$testable"
- << BSON_ARRAY( "$path" << BSON_ARRAY( 55 << 66 ) ) ) ),
- expressionToBson( optimized ) );
- }
- };
-
- /** Factory optimization flattens nested operators of the same type. */
- class FlattenOptimize {
- public:
- void run() {
- intrusive_ptr<Testable> testable =
- Testable::createFromOperands
- ( BSON_ARRAY( 55 << "$path" <<
- // $and has a factory, but it's a different factory from
- // $testable.
- BSON( "$add" << BSON_ARRAY( 5 << 6 << "$q" ) ) <<
- 66 ),
- true );
- // Add a nested $testable operand.
- testable->addOperand
- ( Testable::createFromOperands
- ( BSON_ARRAY( 99 << 100 << "$another_path" ), true ) );
- intrusive_ptr<Expression> optimized = testable->optimize();
- ASSERT_EQUALS
- ( constify( BSON( "$testable" <<
- BSON_ARRAY( // non constant parts
- "$path" <<
- BSON( "$add" << BSON_ARRAY( "$q" << 11 ) ) <<
- "$another_path" <<
- // constant part last
- BSON_ARRAY( 55 << 66 << BSON_ARRAY( 99 << 100 ) ) ) ) ),
- expressionToBson( optimized ) );
- }
- };
-
- /** Three layers of factory optimization are flattened. */
- class FlattenThreeLayers {
- public:
- void run() {
- intrusive_ptr<Testable> top =
- Testable::createFromOperands( BSON_ARRAY( 1 << 2 << "$a" ), true );
- intrusive_ptr<Testable> nested =
- Testable::createFromOperands( BSON_ARRAY( 3 << 4 << "$b" ), true );
- nested->addOperand
- ( Testable::createFromOperands( BSON_ARRAY( 5 << 6 << "$c" ), true ) );
- top->addOperand( nested );
- intrusive_ptr<Expression> optimized = top->optimize();
- ASSERT_EQUALS
- ( constify( BSON( "$testable" <<
- BSON_ARRAY( "$a" << "$b" << "$c" <<
- BSON_ARRAY( 1 << 2 <<
- BSON_ARRAY( 3 << 4 <<
- BSON_ARRAY( 5 << 6 ) ) ) ) ) ),
- expressionToBson( optimized ) );
- }
- };
-
- } // namespace Nary
-
- namespace Object {
-
- class Base {
- protected:
- void assertDependencies( const BSONArray& expectedDependencies,
- const intrusive_ptr<ExpressionObject>& expression,
- bool includePath = true ) const {
- vector<string> path;
- DepsTracker dependencies;
- expression->addDependencies( &dependencies, includePath ? &path : 0 );
- BSONArrayBuilder bab;
- for( set<string>::const_iterator i = dependencies.fields.begin();
- i != dependencies.fields.end();
- ++i ) {
- bab << *i;
- }
- ASSERT_EQUALS( expectedDependencies, bab.arr() );
- ASSERT_EQUALS( false, dependencies.needWholeDocument );
- ASSERT_EQUALS( false, dependencies.needTextScore );
- }
- };
-
- class ExpectedResultBase : public Base {
- public:
- virtual ~ExpectedResultBase() {
- }
- void run() {
- _expression = ExpressionObject::createRoot();
- prepareExpression();
- Document document = fromBson( source() );
- MutableDocument result;
- Variables vars(0, document);
- expression()->addToDocument( result, document, &vars );
- assertBinaryEqual( expected(), toBson( result.freeze() ) );
- assertDependencies( expectedDependencies(), _expression );
- ASSERT_EQUALS( expectedBsonRepresentation(), expressionToBson( _expression ) );
- ASSERT_EQUALS( expectedIsSimple(), _expression->isSimple() );
- }
- protected:
- intrusive_ptr<ExpressionObject> expression() { return _expression; }
- virtual BSONObj source() { return BSON( "_id" << 0 << "a" << 1 << "b" << 2 ); }
- virtual void prepareExpression() = 0;
- virtual BSONObj expected() = 0;
- virtual BSONArray expectedDependencies() = 0;
- virtual BSONObj expectedBsonRepresentation() = 0;
- virtual bool expectedIsSimple() { return true; }
- private:
- intrusive_ptr<ExpressionObject> _expression;
- };
-
- /** Empty object spec. */
- class Empty : public ExpectedResultBase {
- public:
- void prepareExpression() {}
- BSONObj expected() { return BSON( "_id" << 0 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() { return BSONObj(); }
- };
-
- /** Include 'a' field only. */
- class Include : public ExpectedResultBase {
- public:
- void prepareExpression() { expression()->includePath( "a" ); }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << 1 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << true );
- }
- };
-
- /** Cannot include missing 'a' field. */
- class MissingInclude : public ExpectedResultBase {
- public:
- virtual BSONObj source() { return BSON( "_id" << 0 << "b" << 2 ); }
- void prepareExpression() { expression()->includePath( "a" ); }
- BSONObj expected() { return BSON( "_id" << 0 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << true );
- }
- };
-
- /** Include '_id' field only. */
- class IncludeId : public ExpectedResultBase {
- public:
- void prepareExpression() { expression()->includePath( "_id" ); }
- BSONObj expected() { return BSON( "_id" << 0 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "_id" << true );
- }
- };
-
- /** Exclude '_id' field. */
- class ExcludeId : public ExpectedResultBase {
- public:
- void prepareExpression() {
- expression()->includePath( "b" );
- expression()->excludeId( true );
- }
- BSONObj expected() { return BSON( "b" << 2 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "b" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "_id" << false << "b" << true );
- }
- };
-
- /** Result order based on source document field order, not inclusion spec field order. */
- class SourceOrder : public ExpectedResultBase {
- public:
- void prepareExpression() {
- expression()->includePath( "b" );
- expression()->includePath( "a" );
- }
- BSONObj expected() { return source(); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a" << "b" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "b" << true << "a" << true );
- }
- };
-
- /** Include a nested field. */
- class IncludeNested : public ExpectedResultBase {
- public:
- void prepareExpression() { expression()->includePath( "a.b" ); }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSON( "b" << 5 ) ); }
- BSONObj source() {
- return BSON( "_id" << 0 << "a" << BSON( "b" << 5 << "c" << 6 ) << "z" << 2 );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << true ) );
- }
- };
-
- /** Include two nested fields. */
- class IncludeTwoNested : public ExpectedResultBase {
- public:
- void prepareExpression() {
- expression()->includePath( "a.b" );
- expression()->includePath( "a.c" );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSON( "b" << 5 << "c" << 6 ) ); }
- BSONObj source() {
- return BSON( "_id" << 0 << "a" << BSON( "b" << 5 << "c" << 6 ) << "z" << 2 );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b" << "a.c" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << true << "c" << true ) );
- }
- };
-
- /** Include two fields nested within different parents. */
- class IncludeTwoParentNested : public ExpectedResultBase {
- public:
- void prepareExpression() {
- expression()->includePath( "a.b" );
- expression()->includePath( "c.d" );
- }
- BSONObj expected() {
- return BSON( "_id" << 0 << "a" << BSON( "b" << 5 ) << "c" << BSON( "d" << 6 ) );
- }
- BSONObj source() {
- return BSON( "_id" << 0 << "a" << BSON( "b" << 5 )
- << "c" << BSON( "d" << 6 ) << "z" << 2 );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b" << "c.d" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << true ) << "c" << BSON( "d" << true ) );
- }
- };
-
- /** Attempt to include a missing nested field. */
- class IncludeMissingNested : public ExpectedResultBase {
- public:
- void prepareExpression() { expression()->includePath( "a.b" ); }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSONObj() ); }
- BSONObj source() {
- return BSON( "_id" << 0 << "a" << BSON( "c" << 6 ) << "z" << 2 );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << true ) );
- }
- };
-
- /** Attempt to include a nested field within a non object. */
- class IncludeNestedWithinNonObject : public ExpectedResultBase {
- public:
- void prepareExpression() { expression()->includePath( "a.b" ); }
- BSONObj expected() { return BSON( "_id" << 0 ); }
- BSONObj source() {
- return BSON( "_id" << 0 << "a" << 2 << "z" << 2 );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << true ) );
- }
- };
-
- /** Include a nested field within an array. */
- class IncludeArrayNested : public ExpectedResultBase {
- public:
- void prepareExpression() { expression()->includePath( "a.b" ); }
- BSONObj expected() { return fromjson( "{_id:0,a:[{b:5},{b:2},{}]}" ); }
- BSONObj source() {
- return fromjson( "{_id:0,a:[{b:5,c:6},{b:2,c:9},{c:7},[],2],z:1}" );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << true ) );
- }
- };
-
- /** Don't include not root '_id' field implicitly. */
- class ExcludeNonRootId : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 << "a" << BSON( "_id" << 1 << "b" << 1 ) );
- }
- void prepareExpression() { expression()->includePath( "a.b" ); }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSON( "b" << 1 ) ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << true ) );
- }
- };
+class NoOptimizeBase : public OptimizeBase {
+ BSONObj expectedOptimized() {
+ return constify(spec());
+ }
+};
- /** Project a computed expression. */
- class Computed : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << 5 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "$const" << 5 ) );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** Project a computed expression replacing an existing field. */
- class ComputedReplacement : public Computed {
- virtual BSONObj source() {
- return BSON( "_id" << 0 << "a" << 99 );
- }
- };
-
- /** An undefined value is passed through */
- class ComputedUndefined : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value(BSONUndefined) ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSONUndefined); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return fromjson( "{a:{$const:undefined}}" );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** Project a computed expression replacing an existing field with Undefined. */
- class ComputedUndefinedReplacement : public ComputedUndefined {
- virtual BSONObj source() {
- return BSON( "_id" << 0 << "a" << 99 );
- }
- };
-
- /** A null value is projected. */
- class ComputedNull : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value(BSONNULL) ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSONNULL ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "$const" << BSONNULL ) );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** A nested value is projected. */
- class ComputedNested : public ExpectedResultBase {
- public:
- virtual BSONObj source() { return BSON( "_id" << 0 ); }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create( Value( 5 ) ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSON( "b" << 5 ) ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return BSON( "a" << BSON( "b" << BSON( "$const" << 5 ) ) );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** A field path is projected. */
- class ComputedFieldPath : public ExpectedResultBase {
- public:
- virtual BSONObj source() { return BSON( "_id" << 0 << "x" << 4 ); }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a" ),
- ExpressionFieldPath::create( "x" ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << 4 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "x" ); }
- BSONObj expectedBsonRepresentation() { return BSON( "a" << "$x" ); }
- bool expectedIsSimple() { return false; }
- };
-
- /** A nested field path is projected. */
- class ComputedNestedFieldPath : public ExpectedResultBase {
- public:
- virtual BSONObj source() { return BSON( "_id" << 0 << "x" << BSON( "y" << 4 ) ); }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a.b" ),
- ExpressionFieldPath::create( "x.y" ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSON( "b" << 4 ) ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "x.y" ); }
- BSONObj expectedBsonRepresentation() { return BSON( "a" << BSON( "b" << "$x.y" ) ); }
- bool expectedIsSimple() { return false; }
- };
-
- /** An empty subobject expression for a missing field is not projected. */
- class EmptyNewSubobject : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- // Create a sub expression returning an empty object.
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->addField( mongo::FieldPath( "b" ),
- ExpressionFieldPath::create( "a.b" ) );
- expression()->addField( mongo::FieldPath( "a" ), subExpression );
- }
- BSONObj expected() { return BSON( "_id" << 0 ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" << "a.b"); }
- BSONObj expectedBsonRepresentation() {
- return fromjson( "{a:{b:'$a.b'}}" );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** A non empty subobject expression for a missing field is projected. */
- class NonEmptyNewSubobject : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- // Create a sub expression returning an empty object.
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->addField( mongo::FieldPath( "b" ),
- ExpressionConstant::create( Value( 6 ) ) );
- expression()->addField( mongo::FieldPath( "a" ), subExpression );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSON( "b" << 6 ) ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return fromjson( "{a:{b:{$const:6}}}" );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** Two computed fields within a common parent. */
- class AdjacentDottedComputedFields : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create( Value( 6 ) ) );
- expression()->addField( mongo::FieldPath( "a.c" ),
- ExpressionConstant::create( Value( 7 ) ) );
- }
- BSONObj expected() { return BSON( "_id" << 0 << "a" << BSON( "b" << 6 << "c" << 7 ) ); }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return fromjson( "{a:{b:{$const:6},c:{$const:7}}}" );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** Two computed fields within a common parent, in one case dotted. */
- class AdjacentDottedAndNestedComputedFields : public AdjacentDottedComputedFields {
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create( Value( 6 ) ) );
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->addField( mongo::FieldPath( "c" ),
- ExpressionConstant::create( Value( 7 ) ) );
- expression()->addField( mongo::FieldPath( "a" ), subExpression );
- }
- };
-
- /** Two computed fields within a common parent, in another case dotted. */
- class AdjacentNestedAndDottedComputedFields : public AdjacentDottedComputedFields {
- void prepareExpression() {
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->addField( mongo::FieldPath( "b" ),
- ExpressionConstant::create( Value( 6 ) ) );
- expression()->addField( mongo::FieldPath( "a" ), subExpression );
- expression()->addField( mongo::FieldPath( "a.c" ),
- ExpressionConstant::create( Value( 7 ) ) );
- }
- };
-
- /** Two computed fields within a common parent, nested rather than dotted. */
- class AdjacentNestedComputedFields : public AdjacentDottedComputedFields {
- void prepareExpression() {
- intrusive_ptr<ExpressionObject> firstSubExpression = ExpressionObject::create();
- firstSubExpression->addField( mongo::FieldPath( "b" ),
- ExpressionConstant::create( Value( 6 ) ) );
- expression()->addField( mongo::FieldPath( "a" ), firstSubExpression );
- intrusive_ptr<ExpressionObject> secondSubExpression = ExpressionObject::create();
- secondSubExpression->addField( mongo::FieldPath( "c" ),
- ExpressionConstant::create
- ( Value( 7 ) ) );
- expression()->addField( mongo::FieldPath( "a" ), secondSubExpression );
- }
- };
-
- /** Field ordering is preserved when nested fields are merged. */
- class AdjacentNestedOrdering : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create( Value( 6 ) ) );
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- // Add field 'd' then 'c'. Expect the same field ordering in the result doc.
- subExpression->addField( mongo::FieldPath( "d" ),
- ExpressionConstant::create( Value( 7 ) ) );
- subExpression->addField( mongo::FieldPath( "c" ),
- ExpressionConstant::create( Value( 8 ) ) );
- expression()->addField( mongo::FieldPath( "a" ), subExpression );
- }
- BSONObj expected() {
- return BSON( "_id" << 0 << "a" << BSON( "b" << 6 << "d" << 7 << "c" << 8 ) );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return fromjson( "{a:{b:{$const:6},d:{$const:7},c:{$const:8}}}" );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** Adjacent fields two levels deep. */
- class MultipleNestedFields : public ExpectedResultBase {
- public:
- virtual BSONObj source() {
- return BSON( "_id" << 0 );
- }
- void prepareExpression() {
- expression()->addField( mongo::FieldPath( "a.b.c" ),
- ExpressionConstant::create( Value( 6 ) ) );
- intrusive_ptr<ExpressionObject> bSubExpression = ExpressionObject::create();
- bSubExpression->addField( mongo::FieldPath( "d" ),
- ExpressionConstant::create( Value( 7 ) ) );
- intrusive_ptr<ExpressionObject> aSubExpression = ExpressionObject::create();
- aSubExpression->addField( mongo::FieldPath( "b" ), bSubExpression );
- expression()->addField( mongo::FieldPath( "a" ), aSubExpression );
- }
- BSONObj expected() {
- return BSON( "_id" << 0 << "a" << BSON( "b" << BSON( "c" << 6 << "d" << 7 ) ) );
- }
- BSONArray expectedDependencies() { return BSON_ARRAY( "_id" ); }
- BSONObj expectedBsonRepresentation() {
- return fromjson( "{a:{b:{c:{$const:6},d:{$const:7}}}}" );
- }
- bool expectedIsSimple() { return false; }
- };
-
- /** Two expressions cannot generate the same field. */
- class ConflictingExpressionFields : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a" ), // Duplicate field.
- ExpressionConstant::create
- ( Value( 6 ) ) ),
- UserException );
- }
- };
-
- /** An expression field conflicts with an inclusion field. */
- class ConflictingInclusionExpressionFields : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->includePath( "a" );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create
- ( Value( 6 ) ) ),
- UserException );
- }
- };
-
- /** An inclusion field conflicts with an expression field. */
- class ConflictingExpressionInclusionFields : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_THROWS( expression->includePath( "a" ),
- UserException );
- }
- };
-
- /** An object expression conflicts with a constant expression. */
- class ConflictingObjectConstantExpressionFields : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->includePath( "b" );
- expression->addField( mongo::FieldPath( "a" ), subExpression );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create
- ( Value( 6 ) ) ),
- UserException );
- }
- };
-
- /** A constant expression conflicts with an object expression. */
- class ConflictingConstantObjectExpressionFields : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create( Value( 6 ) ) );
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->includePath( "b" );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a" ), subExpression ),
- UserException );
- }
- };
-
- /** Two nested expressions cannot generate the same field. */
- class ConflictingNestedFields : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a.b" ), // Duplicate field.
- ExpressionConstant::create
- ( Value( 6 ) ) ),
- UserException );
- }
- };
-
- /** An expression cannot be created for a subfield of another expression. */
- class ConflictingFieldAndSubfield : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create
- ( Value( 5 ) ) ),
- UserException );
- }
- };
-
- /** An expression cannot be created for a nested field of another expression. */
- class ConflictingFieldAndNestedField : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->addField( mongo::FieldPath( "b" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a" ), subExpression ),
- UserException );
- }
- };
-
- /** An expression cannot be created for a parent field of another expression. */
- class ConflictingSubfieldAndField : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a.b" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create
- ( Value( 5 ) ) ),
- UserException );
- }
- };
-
- /** An expression cannot be created for a parent of a nested field. */
- class ConflictingNestedFieldAndField : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
- subExpression->addField( mongo::FieldPath( "b" ),
- ExpressionConstant::create( Value( 5 ) ) );
- expression->addField( mongo::FieldPath( "a" ), subExpression );
- ASSERT_THROWS( expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create
- ( Value( 5 ) ) ),
- UserException );
- }
- };
-
- /** Dependencies for non inclusion expressions. */
- class NonInclusionDependencies : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- assertDependencies( BSON_ARRAY( "_id" ), expression, true );
- assertDependencies( BSONArray(), expression, false );
- expression->addField( mongo::FieldPath( "b" ),
- ExpressionFieldPath::create( "c.d" ) );
- assertDependencies( BSON_ARRAY( "_id" << "c.d" ), expression, true );
- assertDependencies( BSON_ARRAY( "c.d" ), expression, false );
- }
- };
-
- /** Dependencies for inclusion expressions. */
- class InclusionDependencies : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->includePath( "a" );
- assertDependencies( BSON_ARRAY( "_id" << "a" ), expression, true );
- DepsTracker unused;
- // 'path' must be provided for inclusion expressions.
- ASSERT_THROWS( expression->addDependencies( &unused ), UserException );
- }
- };
-
- /** Optimizing an object expression optimizes its sub expressions. */
- class Optimize : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- // Add inclusion.
- expression->includePath( "a" );
- // Add non inclusion.
- intrusive_ptr<Expression> andExpr = new ExpressionAnd();
- expression->addField( mongo::FieldPath( "b" ), andExpr );
- expression->optimize();
- // Optimizing 'expression' optimizes its non inclusion sub expressions, while
- // inclusion sub expressions are passed through.
- ASSERT_EQUALS( BSON( "a" << true << "b" << BSON( "$const" << true ) ),
- expressionToBson( expression ) );
- }
- };
-
- /** Serialize to a BSONObj. */
- class AddToBsonObj : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_EQUALS(constify(BSON("foo" << BSON("a" << 5))),
- BSON("foo" << expression->serialize(false)));
- }
- };
-
- /** Serialize to a BSONObj, with constants represented by expressions. */
- class AddToBsonObjRequireExpression : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- ASSERT_EQUALS(BSON("foo" << BSON("a" << BSON("$const" << 5))),
- BSON("foo" << expression->serialize(false)));
- }
- };
-
- /** Serialize to a BSONArray. */
- class AddToBsonArray : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->addField( mongo::FieldPath( "a" ),
- ExpressionConstant::create( Value( 5 ) ) );
- BSONArrayBuilder bab;
- bab << expression->serialize(false);
- ASSERT_EQUALS( constify( BSON_ARRAY( BSON( "a" << 5 ) ) ), bab.arr() );
- }
- };
-
- /**
- * evaluate() does not supply an inclusion document. Inclusion spec'd fields are not
- * included. (Inclusion specs are not generally expected/allowed in cases where evaluate
- * is called instead of addToDocument.)
- */
- class Evaluate : public Base {
- public:
- void run() {
- intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
- expression->includePath( "a" );
- expression->addField( mongo::FieldPath( "b" ),
- ExpressionConstant::create( Value( 5 ) ) );
- expression->addField( mongo::FieldPath( "c" ),
- ExpressionFieldPath::create( "a" ) );
- ASSERT_EQUALS( BSON( "b" << 5 << "c" << 1 ),
- toBson( expression->evaluate
- ( fromBson
- ( BSON( "_id" << 0 << "a" << 1 ) ) ).getDocument() ) );
- }
- };
+/** $and without operands. */
+class NoOperands : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSONArray());
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
- } // namespace Object
+/** $and passed 'true'. */
+class True : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(true));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
- namespace Or {
-
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {
- }
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- ASSERT_EQUALS( BSON( "" << expectedResult() ),
- toBson( expression->evaluate( fromBson( BSON( "a" << 1 ) ) ) ) );
- intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS( BSON( "" << expectedResult() ),
- toBson( optimized->evaluate( fromBson( BSON( "a" << 1 ) ) ) ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual bool expectedResult() = 0;
- };
-
- class OptimizeBase {
- public:
- virtual ~OptimizeBase() {
- }
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS( expectedOptimized(), expressionToBson( optimized ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual BSONObj expectedOptimized() = 0;
- };
-
- class NoOptimizeBase : public OptimizeBase {
- BSONObj expectedOptimized() { return constify( spec() ); }
- };
-
- /** $or without operands. */
- class NoOperands : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSONArray() ); }
- bool expectedResult() { return false; }
- };
-
- /** $or passed 'true'. */
- class True : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( true ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $or passed 'false'. */
- class False : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $or passed 'true', 'true'. */
- class TrueTrue : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( true << true ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $or passed 'true', 'false'. */
- class TrueFalse : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( true << false ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $or passed 'false', 'true'. */
- class FalseTrue : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( false << true ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $or passed 'false', 'false'. */
- class FalseFalse : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( false << false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $or passed 'false', 'false', 'false'. */
- class FalseFalseFalse : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( false << false << false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $or passed 'false', 'false', 'true'. */
- class FalseFalseTrue : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( false << false << true ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $or passed '0', '1'. */
- class ZeroOne : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( 0 << 1 ) ); }
- bool expectedResult() { return true; }
- };
-
- /** $or passed '0', 'false'. */
- class ZeroFalse : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( 0 << false ) ); }
- bool expectedResult() { return false; }
- };
-
- /** $or passed a field path. */
- class FieldPath : public ExpectedResultBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( "$a" ) ); }
- bool expectedResult() { return true; }
- };
-
- /** A constant expression is optimized to a constant. */
- class OptimizeConstantExpression : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( 1 ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- /** A non constant expression is not optimized. */
- class NonConstant : public NoOptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( "$a" ) ); }
- };
-
- /** An expression beginning with a single constant is optimized. */
- class ConstantNonConstantTrue : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( 1 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- /** An expression beginning with a single constant is optimized. */
- class ConstantNonConstantFalse : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( 0 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY("$a") ); }
- // note: using $and as serialization of ExpressionCoerceToBool rather than ExpressionAnd
- };
-
- /** An expression with a field path and '1'. */
- class NonConstantOne : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( "$a" << 1 ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- /** An expression with a field path and '0'. */
- class NonConstantZero : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( "$a" << 0 ) ); }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY( "$a" ) ); }
- };
-
- /** An expression with two field paths and '1'. */
- class NonConstantNonConstantOne : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( "$a" << "$b" << 1 ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- /** An expression with two field paths and '0'. */
- class NonConstantNonConstantZero : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( "$a" << "$b" << 0 ) ); }
- BSONObj expectedOptimized() { return BSON( "$or" << BSON_ARRAY( "$a" << "$b" ) ); }
- };
-
- /** An expression with '0', '1', and a field path. */
- class ZeroOneNonConstant : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( 0 << 1 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- /** An expression with '0', '0', and a field path. */
- class ZeroZeroNonConstant : public OptimizeBase {
- BSONObj spec() { return BSON( "$or" << BSON_ARRAY( 0 << 0 << "$a" ) ); }
- BSONObj expectedOptimized() { return BSON( "$and" << BSON_ARRAY( "$a" ) ); }
- };
-
- /** Nested $or expressions. */
- class Nested : public OptimizeBase {
- BSONObj spec() {
- return BSON( "$or" <<
- BSON_ARRAY( 0 << BSON( "$or" << BSON_ARRAY( 0 ) ) << "$a" << "$b" ) );
- }
- BSONObj expectedOptimized() { return BSON( "$or" << BSON_ARRAY( "$a" << "$b" ) ); }
- };
-
- /** Nested $or expressions containing a nested value evaluating to false. */
- class NestedOne : public OptimizeBase {
- BSONObj spec() {
- return BSON( "$or" <<
- BSON_ARRAY( 0 <<
- BSON( "$or" <<
- BSON_ARRAY( BSON( "$or" <<
- BSON_ARRAY( 1 ) ) ) ) <<
- "$a" << "$b" ) );
- }
- BSONObj expectedOptimized() { return BSON( "$const" << true ); }
- };
-
- } // namespace Or
-
- namespace Parse {
-
- namespace Object {
-
- class Base {
- public:
- virtual ~Base() {}
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- Expression::ObjectCtx context = objectCtx();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression =
- Expression::parseObject( specElement.Obj(), &context, vps );
- ASSERT_EQUALS( expectedBson(), expressionToBson( expression ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual Expression::ObjectCtx objectCtx() {
- return Expression::ObjectCtx( Expression::ObjectCtx::DOCUMENT_OK );
- }
- virtual BSONObj expectedBson() { return constify( spec() ); }
- };
-
- class ParseError {
- public:
- virtual ~ParseError() {}
- void run() {
- BSONObj specObject = BSON( "" << spec() );
- BSONElement specElement = specObject.firstElement();
- Expression::ObjectCtx context = objectCtx();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- ASSERT_THROWS( Expression::parseObject( specElement.Obj(), &context, vps ),
- UserException );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual Expression::ObjectCtx objectCtx() {
- return Expression::ObjectCtx( Expression::ObjectCtx::DOCUMENT_OK );
- }
- };
-
- /** The spec must be an object. */
- class NonObject {
- public:
- void run() {
- BSONObj specObject = BSON( "" << 1 );
- BSONElement specElement = specObject.firstElement();
- Expression::ObjectCtx context =
- Expression::ObjectCtx( Expression::ObjectCtx::DOCUMENT_OK );
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- ASSERT_THROWS( Expression::parseObject( specElement.Obj(), &context, vps ),
- UserException );
- }
- };
-
- /** Empty object. */
- class Empty : public Base {
- BSONObj spec() { return BSONObj(); }
- };
-
- /** Operator spec object. */
- class Operator : public Base {
- BSONObj spec() { return BSON( "$and" << BSONArray() ); }
- };
-
- /** Invalid operator not allowed. */
- class InvalidOperator : public ParseError {
- BSONObj spec() { return BSON( "$invalid" << 1 ); }
- };
-
- /** Two operators not allowed. */
- class TwoOperators : public ParseError {
- BSONObj spec() { return BSON( "$and" << BSONArray() << "$or" << BSONArray() ); }
- };
-
- /** An operator must be the first and only field. */
- class OperatorLaterField : public ParseError {
- BSONObj spec() {
- return BSON( "a" << BSON( "$and" << BSONArray() ) << "$or" << BSONArray() );
- }
- };
-
- /** An operator must be the first and only field. */
- class OperatorAndOtherField : public ParseError {
- BSONObj spec() {
- return BSON( "$and" << BSONArray() << "a" << BSON( "$or" << BSONArray() ) );
- }
- };
-
- /** Operators not allowed at the top level of a projection. */
- class OperatorTopLevel : public ParseError {
- BSONObj spec() { return BSON( "$and" << BSONArray() ); }
- Expression::ObjectCtx objectCtx() {
- return Expression::ObjectCtx( Expression::ObjectCtx::DOCUMENT_OK |
- Expression::ObjectCtx::TOP_LEVEL );
- }
- };
-
- /** Dotted fields are not generally allowed. */
- class Dotted : public ParseError {
- BSONObj spec() { return BSON( "a.b" << BSON( "$and" << BSONArray() ) ); }
- };
-
- /** Dotted fields are allowed at the top level. */
- class DottedTopLevel : public Base {
- BSONObj spec() { return BSON( "a.b" << BSON( "$and" << BSONArray() ) ); }
- Expression::ObjectCtx objectCtx() {
- return Expression::ObjectCtx( Expression::ObjectCtx::DOCUMENT_OK |
- Expression::ObjectCtx::TOP_LEVEL );
- }
- BSONObj expectedBson() {
- return BSON( "a" << BSON( "b" << BSON( "$and" << BSONArray() ) ) );
- }
- };
-
- /** Nested spec. */
- class Nested : public Base {
- BSONObj spec() { return BSON( "a" << BSON( "$and" << BSONArray() ) ); }
- };
-
- /** Parse error in nested document. */
- class NestedParseError : public ParseError {
- BSONObj spec() {
- return BSON( "a" << BSON( "$and" << BSONArray() << "$or" << BSONArray() ) );
- }
- };
-
- /** FieldPath expression. */
- class FieldPath : public Base {
- BSONObj spec() { return BSON( "a" << "$field" ); }
- };
-
- /** Invalid FieldPath expression. */
- class InvalidFieldPath : public ParseError {
- BSONObj spec() { return BSON( "a" << "$field." ); }
- };
-
- /** Non FieldPath string. */
- class NonFieldPathString : public ParseError {
- BSONObj spec() { return BSON( "a" << "foo" ); }
- };
-
- /** Inclusion spec not allowed. */
- class DisallowedInclusion : public ParseError {
- BSONObj spec() { return BSON( "a" << 1 ); }
- };
-
- class InclusionBase : public Base {
- Expression::ObjectCtx objectCtx() {
- return Expression::ObjectCtx( Expression::ObjectCtx::DOCUMENT_OK |
- Expression::ObjectCtx::INCLUSION_OK );
- }
- BSONObj expectedBson() { return BSON( "a" << true ); }
- };
-
- /** Inclusion with bool type. */
- class InclusionBool : public InclusionBase {
- BSONObj spec() { return BSON( "a" << true ); }
- };
-
- /** Inclusion with double type. */
- class InclusionDouble : public InclusionBase {
- BSONObj spec() { return BSON( "a" << 1.0 ); }
- };
-
- /** Inclusion with int type. */
- class InclusionInt : public InclusionBase {
- BSONObj spec() { return BSON( "a" << 1 ); }
- };
-
- /** Inclusion with long type. */
- class InclusionLong : public InclusionBase {
- BSONObj spec() { return BSON( "a" << 1LL ); }
- };
-
- /** Inclusion of a nested field. */
- class NestedInclusion : public InclusionBase {
- BSONObj spec() { return BSON( "a" << BSON( "b" << true ) ); }
- BSONObj expectedBson() { return spec(); }
- };
-
- /** Exclude _id. */
- class ExcludeId : public Base {
- BSONObj spec() { return BSON( "_id" << 0 ); }
- Expression::ObjectCtx objectCtx() {
- return Expression::ObjectCtx( Expression::ObjectCtx::DOCUMENT_OK |
- Expression::ObjectCtx::TOP_LEVEL );
- }
- BSONObj expectedBson() { return BSON( "_id" << false ); }
- };
-
- /** Excluding non _id field not allowed. */
- class ExcludeNonId : public ParseError {
- BSONObj spec() { return BSON( "a" << 0 ); }
- };
-
- /** Excluding _id not top level. */
- class ExcludeIdNotTopLevel : public ParseError {
- BSONObj spec() { return BSON( "_id" << 0 ); }
- };
-
- /** Invalid value type. */
- class InvalidType : public ParseError {
- BSONObj spec() { return BSON( "a" << BSONNULL ); }
- };
-
- } // namespace Object
-
- namespace Expression {
-
- using mongo::Expression;
-
- class Base {
- public:
- virtual ~Base() {}
- void run() {
- BSONObj specObject = spec();
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseExpression(specElement,
- vps);
- ASSERT_EQUALS( constify( expectedBson() ), expressionToBson( expression ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual BSONObj expectedBson() { return constify( spec() ); }
- };
-
- class ParseError {
- public:
- virtual ~ParseError() {}
- void run() {
- BSONObj specObject = spec();
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- ASSERT_THROWS(Expression::parseExpression(specElement, vps), UserException);
- }
- protected:
- virtual BSONObj spec() = 0;
- };
-
- /** A constant expression. */
- class Const : public Base {
- BSONObj spec() { return BSON( "$const" << 5 ); }
- };
-
- /** An expression with an invalid name. */
- class InvalidName : public ParseError {
- BSONObj spec() { return BSON( "$invalid" << 1 ); }
- };
-
- /** An expression requiring an array that is not provided with an array. */
- class RequiredArrayMissing : public ParseError {
- BSONObj spec() { return BSON( "$strcasecmp" << "foo" ); }
- };
-
- /** An expression with the wrong number of operands. */
- class IncorrectOperandCount : public ParseError {
- BSONObj spec() { return BSON( "$strcasecmp" << BSON_ARRAY( "foo" ) ); }
- };
-
- /** An expression with the correct number of operands. */
- class CorrectOperandCount : public Base {
- BSONObj spec() { return BSON( "$strcasecmp" << BSON_ARRAY( "foo" << "FOO" ) ); }
- };
-
- /** An variable argument expression with zero operands. */
- class ZeroOperands : public Base {
- BSONObj spec() { return BSON( "$and" << BSONArray() ); }
- };
-
- /** An variable argument expression with one operand. */
- class OneOperand : public Base {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 1 ) ); }
- };
-
- /** An variable argument expression with two operands. */
- class TwoOperands : public Base {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( 1 << 2 ) ); }
- };
-
- /** An variable argument expression with a singleton operand. */
- class SingletonOperandVariable : public Base {
- BSONObj spec() { return BSON( "$and" << 1 ); }
- BSONObj expectedBson() { return BSON( "$and" << BSON_ARRAY( 1 ) ); }
- };
-
- /** An fixed argument expression with a singleton operand. */
- class SingletonOperandFixed : public Base {
- BSONObj spec() { return BSON( "$not" << 1 ); }
- BSONObj expectedBson() { return BSON( "$not" << BSON_ARRAY( 1 ) ); }
- };
-
- /** An object can be provided as a singleton argument. */
- class ObjectSingleton : public Base {
- BSONObj spec() { return BSON( "$and" << BSON( "$const" << 1 ) ); }
- BSONObj expectedBson() { return BSON("$and" << BSON_ARRAY(BSON("$const" << 1))); }
- };
-
- /** An object can be provided as an array agrument. */
- class ObjectOperand : public Base {
- BSONObj spec() { return BSON( "$and" << BSON_ARRAY( BSON( "$const" << 1 ) ) ); }
- BSONObj expectedBson() { return BSON( "$and" << BSON_ARRAY( 1 ) ); }
- };
-
- } // namespace Expression
-
- namespace Operand {
-
- class Base {
- public:
- virtual ~Base() {}
- void run() {
- BSONObj specObject = spec();
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<mongo::Expression> expression =
- mongo::Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( expectedBson(), expressionToBson( expression ) );
- }
- protected:
- virtual BSONObj spec() = 0;
- virtual BSONObj expectedBson() { return constify( spec() ); }
- };
-
- class ParseError {
- public:
- virtual ~ParseError() {}
- void run() {
- BSONObj specObject = spec();
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- ASSERT_THROWS(mongo::Expression::parseOperand(specElement, vps), UserException);
- }
- protected:
- virtual BSONObj spec() = 0;
- };
-
- /** A field path operand. */
- class FieldPath {
- public:
- void run() {
- BSONObj specObject = BSON( "" << "$field" );
- BSONElement specElement = specObject.firstElement();
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<mongo::Expression> expression =
- mongo::Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS(specObject, BSON("" << expression->serialize(false)));
- }
- };
-
- /** A string constant (not field path) operand. */
- class NonFieldPathString : public Base {
- BSONObj spec() { return BSON( "" << "foo" ); }
- BSONObj expectedBson() { return BSON( "$const" << "foo" ); }
- };
-
- /** An object operand. */
- class Object : public Base {
- BSONObj spec() { return BSON( "" << BSON( "$and" << BSONArray() ) ); }
- BSONObj expectedBson() { return BSON( "$and" << BSONArray() ); }
- };
-
- /** An inclusion operand. */
- class InclusionObject : public ParseError {
- BSONObj spec() { return BSON( "" << BSON( "a" << 1 ) ); }
- };
-
- /** A constant operand. */
- class Constant : public Base {
- BSONObj spec() { return BSON( "" << 5 ); }
- BSONObj expectedBson() { return BSON( "$const" << 5 ); }
- };
-
- } // namespace Operand
-
- } // namespace Parse
-
- namespace Set {
- Value sortSet(Value set) {
- if (set.nullish()) {
- return Value(BSONNULL);
- }
- vector<Value> sortedSet = set.getArray();
- std::sort(sortedSet.begin(), sortedSet.end());
- return Value(sortedSet);
+/** $and passed 'false'. */
+class False : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $and passed 'true', 'true'. */
+class TrueTrue : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(true << true));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $and passed 'true', 'false'. */
+class TrueFalse : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(true << false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $and passed 'false', 'true'. */
+class FalseTrue : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(false << true));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $and passed 'false', 'false'. */
+class FalseFalse : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(false << false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $and passed 'true', 'true', 'true'. */
+class TrueTrueTrue : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(true << true << true));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $and passed 'true', 'true', 'false'. */
+class TrueTrueFalse : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(true << true << false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $and passed '0', '1'. */
+class ZeroOne : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(0 << 1));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $and passed '1', '2'. */
+class OneTwo : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(1 << 2));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $and passed a field path. */
+class FieldPath : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** A constant expression is optimized to a constant. */
+class OptimizeConstantExpression : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(1));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+/** A non constant expression is not optimized. */
+class NonConstant : public NoOptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+};
+
+/** An expression beginning with a single constant is optimized. */
+class ConstantNonConstantTrue : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(1 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+ // note: using $and as serialization of ExpressionCoerceToBool rather than ExpressionAnd
+};
+
+class ConstantNonConstantFalse : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(0 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << false);
+ }
+};
+
+/** An expression with a field path and '1'. */
+class NonConstantOne : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY("$a" << 1));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+};
+
+/** An expression with a field path and '0'. */
+class NonConstantZero : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY("$a" << 0));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << false);
+ }
+};
+
+/** An expression with two field paths and '1'. */
+class NonConstantNonConstantOne : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY("$a"
+ << "$b" << 1));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"
+ << "$b"));
+ }
+};
+
+/** An expression with two field paths and '0'. */
+class NonConstantNonConstantZero : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY("$a"
+ << "$b" << 0));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << false);
+ }
+};
+
+/** An expression with '0', '1', and a field path. */
+class ZeroOneNonConstant : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(0 << 1 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << false);
+ }
+};
+
+/** An expression with '1', '1', and a field path. */
+class OneOneNonConstant : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(1 << 1 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+};
+
+/** Nested $and expressions. */
+class Nested : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(1 << BSON("$and" << BSON_ARRAY(1)) << "$a"
+ << "$b"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"
+ << "$b"));
+ }
+};
+
+/** Nested $and expressions containing a nested value evaluating to false. */
+class NestedZero : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(
+ 1 << BSON("$and" << BSON_ARRAY(BSON("$and" << BSON_ARRAY(0)))) << "$a"
+ << "$b"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << false);
+ }
+};
+
+} // namespace And
+
+namespace CoerceToBool {
+
+/** Nested expression coerced to true. */
+class EvaluateTrue {
+public:
+ void run() {
+ intrusive_ptr<Expression> nested = ExpressionConstant::create(Value(5));
+ intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create(nested);
+ ASSERT(expression->evaluate(Document()).getBool());
+ }
+};
+
+/** Nested expression coerced to false. */
+class EvaluateFalse {
+public:
+ void run() {
+ intrusive_ptr<Expression> nested = ExpressionConstant::create(Value(0));
+ intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create(nested);
+ ASSERT(!expression->evaluate(Document()).getBool());
+ }
+};
+
+/** Dependencies forwarded from nested expression. */
+class Dependencies {
+public:
+ void run() {
+ intrusive_ptr<Expression> nested = ExpressionFieldPath::create("a.b");
+ intrusive_ptr<Expression> expression = ExpressionCoerceToBool::create(nested);
+ DepsTracker dependencies;
+ expression->addDependencies(&dependencies);
+ ASSERT_EQUALS(1U, dependencies.fields.size());
+ ASSERT_EQUALS(1U, dependencies.fields.count("a.b"));
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
+
+/** Output to BSONObj. */
+class AddToBsonObj {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression =
+ ExpressionCoerceToBool::create(ExpressionFieldPath::create("foo"));
+
+ // serialized as $and because CoerceToBool isn't an ExpressionNary
+ assertBinaryEqual(fromjson("{field:{$and:['$foo']}}"), toBsonObj(expression));
+ }
+
+private:
+ static BSONObj toBsonObj(const intrusive_ptr<Expression>& expression) {
+ return BSON("field" << expression->serialize(false));
+ }
+};
+
+/** Output to BSONArray. */
+class AddToBsonArray {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression =
+ ExpressionCoerceToBool::create(ExpressionFieldPath::create("foo"));
+
+ // serialized as $and because CoerceToBool isn't an ExpressionNary
+ assertBinaryEqual(BSON_ARRAY(fromjson("{$and:['$foo']}")), toBsonArray(expression));
+ }
+
+private:
+ static BSONArray toBsonArray(const intrusive_ptr<Expression>& expression) {
+ BSONArrayBuilder bab;
+ bab << expression->serialize(false);
+ return bab.arr();
+ }
+};
+
+
+// TODO Test optimize(), difficult because a CoerceToBool cannot be output as BSON.
+
+} // namespace CoerceToBool
+
+namespace Compare {
+
+class OptimizeBase {
+public:
+ virtual ~OptimizeBase() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ intrusive_ptr<Expression> optimized = expression->optimize();
+ ASSERT_EQUALS(constify(expectedOptimized()), expressionToBson(optimized));
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual BSONObj expectedOptimized() = 0;
+};
+
+class FieldRangeOptimize : public OptimizeBase {
+ BSONObj expectedOptimized() {
+ return spec();
+ }
+};
+
+class NoOptimize : public OptimizeBase {
+ BSONObj expectedOptimized() {
+ return spec();
+ }
+};
+
+/** Check expected result for expressions depending on constants. */
+class ExpectedResultBase : public OptimizeBase {
+public:
+ void run() {
+ OptimizeBase::run();
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ // Check expression spec round trip.
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ // Check evaluation result.
+ ASSERT_EQUALS(expectedResult(), toBson(expression->evaluate(Document())));
+ // Check that the result is the same after optimizing.
+ intrusive_ptr<Expression> optimized = expression->optimize();
+ ASSERT_EQUALS(expectedResult(), toBson(optimized->evaluate(Document())));
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual BSONObj expectedResult() = 0;
+
+private:
+ virtual BSONObj expectedOptimized() {
+ return BSON("$const" << expectedResult().firstElement());
+ }
+};
+
+class ExpectedTrue : public ExpectedResultBase {
+ BSONObj expectedResult() {
+ return BSON("" << true);
+ }
+};
+
+class ExpectedFalse : public ExpectedResultBase {
+ BSONObj expectedResult() {
+ return BSON("" << false);
+ }
+};
+
+class ParseError {
+public:
+ virtual ~ParseError() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ ASSERT_THROWS(Expression::parseOperand(specElement, vps), UserException);
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+};
+
+/** $eq with first < second. */
+class EqLt : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(1 << 2));
+ }
+};
+
+/** $eq with first == second. */
+class EqEq : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(1 << 1));
+ }
+};
+
+/** $eq with first > second. */
+class EqGt : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(1 << 0));
+ }
+};
+
+/** $ne with first < second. */
+class NeLt : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$ne" << BSON_ARRAY(1 << 2));
+ }
+};
+
+/** $ne with first == second. */
+class NeEq : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$ne" << BSON_ARRAY(1 << 1));
+ }
+};
+
+/** $ne with first > second. */
+class NeGt : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$ne" << BSON_ARRAY(1 << 0));
+ }
+};
+
+/** $gt with first < second. */
+class GtLt : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$gt" << BSON_ARRAY(1 << 2));
+ }
+};
+
+/** $gt with first == second. */
+class GtEq : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$gt" << BSON_ARRAY(1 << 1));
+ }
+};
+
+/** $gt with first > second. */
+class GtGt : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$gt" << BSON_ARRAY(1 << 0));
+ }
+};
+
+/** $gte with first < second. */
+class GteLt : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$gte" << BSON_ARRAY(1 << 2));
+ }
+};
+
+/** $gte with first == second. */
+class GteEq : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$gte" << BSON_ARRAY(1 << 1));
+ }
+};
+
+/** $gte with first > second. */
+class GteGt : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$gte" << BSON_ARRAY(1 << 0));
+ }
+};
+
+/** $lt with first < second. */
+class LtLt : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$lt" << BSON_ARRAY(1 << 2));
+ }
+};
+
+/** $lt with first == second. */
+class LtEq : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$lt" << BSON_ARRAY(1 << 1));
+ }
+};
+
+/** $lt with first > second. */
+class LtGt : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$lt" << BSON_ARRAY(1 << 0));
+ }
+};
+
+/** $lte with first < second. */
+class LteLt : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$lte" << BSON_ARRAY(1 << 2));
+ }
+};
+
+/** $lte with first == second. */
+class LteEq : public ExpectedTrue {
+ BSONObj spec() {
+ return BSON("$lte" << BSON_ARRAY(1 << 1));
+ }
+};
+
+/** $lte with first > second. */
+class LteGt : public ExpectedFalse {
+ BSONObj spec() {
+ return BSON("$lte" << BSON_ARRAY(1 << 0));
+ }
+};
+
+/** $cmp with first < second. */
+class CmpLt : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$cmp" << BSON_ARRAY(1 << 2));
+ }
+ BSONObj expectedResult() {
+ return BSON("" << -1);
+ }
+};
+
+/** $cmp with first == second. */
+class CmpEq : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$cmp" << BSON_ARRAY(1 << 1));
+ }
+ BSONObj expectedResult() {
+ return BSON("" << 0);
+ }
+};
+
+/** $cmp with first > second. */
+class CmpGt : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$cmp" << BSON_ARRAY(1 << 0));
+ }
+ BSONObj expectedResult() {
+ return BSON("" << 1);
+ }
+};
+
+/** $cmp results are bracketed to an absolute value of 1. */
+class CmpBracketed : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$cmp" << BSON_ARRAY("z"
+ << "a"));
+ }
+ BSONObj expectedResult() {
+ return BSON("" << 1);
+ }
+};
+
+/** Zero operands provided. */
+class ZeroOperands : public ParseError {
+ BSONObj spec() {
+ return BSON("$ne" << BSONArray());
+ }
+};
+
+/** One operand provided. */
+class OneOperand : public ParseError {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(1));
+ }
+};
+
+/** Three operands provided. */
+class ThreeOperands : public ParseError {
+ BSONObj spec() {
+ return BSON("$gt" << BSON_ARRAY(2 << 3 << 4));
+ }
+};
+
+/** Incompatible types can be compared. */
+class IncompatibleTypes {
+public:
+ void run() {
+ BSONObj specObject = BSON("" << BSON("$ne" << BSON_ARRAY("a" << 1)));
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(expression->evaluate(Document()), Value(true));
+ }
+};
+
+/**
+ * An expression depending on constants is optimized to a constant via
+ * ExpressionNary::optimize().
+ */
+class OptimizeConstants : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(1 << 1));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+/** $cmp is not optimized. */
+class NoOptimizeCmp : public NoOptimize {
+ BSONObj spec() {
+ return BSON("$cmp" << BSON_ARRAY(1 << "$a"));
+ }
+};
+
+/** $ne is not optimized. */
+class NoOptimizeNe : public NoOptimize {
+ BSONObj spec() {
+ return BSON("$ne" << BSON_ARRAY(1 << "$a"));
+ }
+};
+
+/** No optimization is performend without a constant. */
+class NoOptimizeNoConstant : public NoOptimize {
+ BSONObj spec() {
+ return BSON("$ne" << BSON_ARRAY("$a"
+ << "$b"));
+ }
+};
+
+/** No optimization is performend without an immediate field path. */
+class NoOptimizeWithoutFieldPath : public NoOptimize {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(BSON("$and" << BSON_ARRAY("$a")) << 1));
+ }
+};
+
+/** No optimization is performend without an immediate field path. */
+class NoOptimizeWithoutFieldPathReverse : public NoOptimize {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(1 << BSON("$and" << BSON_ARRAY("$a"))));
+ }
+};
+
+/** An equality expression is optimized. */
+class OptimizeEq : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY("$a" << 1));
+ }
+};
+
+/** A reverse sense equality expression is optimized. */
+class OptimizeEqReverse : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$eq" << BSON_ARRAY(1 << "$a"));
+ }
+};
+
+/** A $lt expression is optimized. */
+class OptimizeLt : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$lt" << BSON_ARRAY("$a" << 1));
+ }
+};
+
+/** A reverse sense $lt expression is optimized. */
+class OptimizeLtReverse : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$lt" << BSON_ARRAY(1 << "$a"));
+ }
+};
+
+/** A $lte expression is optimized. */
+class OptimizeLte : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$lte" << BSON_ARRAY("$b" << 2));
+ }
+};
+
+/** A reverse sense $lte expression is optimized. */
+class OptimizeLteReverse : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$lte" << BSON_ARRAY(2 << "$b"));
+ }
+};
+
+/** A $gt expression is optimized. */
+class OptimizeGt : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$gt" << BSON_ARRAY("$b" << 2));
+ }
+};
+
+/** A reverse sense $gt expression is optimized. */
+class OptimizeGtReverse : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$gt" << BSON_ARRAY(2 << "$b"));
+ }
+};
+
+/** A $gte expression is optimized. */
+class OptimizeGte : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$gte" << BSON_ARRAY("$b" << 2));
+ }
+};
+
+/** A reverse sense $gte expression is optimized. */
+class OptimizeGteReverse : public FieldRangeOptimize {
+ BSONObj spec() {
+ return BSON("$gte" << BSON_ARRAY(2 << "$b"));
+ }
+};
+
+} // namespace Compare
+
+namespace Constant {
+
+/** Create an ExpressionConstant from a Value. */
+class Create {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionConstant::create(Value(5));
+ assertBinaryEqual(BSON("" << 5), toBson(expression->evaluate(Document())));
+ }
+};
+
+/** Create an ExpressionConstant from a BsonElement. */
+class CreateFromBsonElement {
+public:
+ void run() {
+ BSONObj spec = BSON("IGNORED_FIELD_NAME"
+ << "foo");
+ BSONElement specElement = spec.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = ExpressionConstant::parse(specElement, vps);
+ assertBinaryEqual(BSON(""
+ << "foo"),
+ toBson(expression->evaluate(Document())));
+ }
+};
+
+/** No optimization is performed. */
+class Optimize {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionConstant::create(Value(5));
+ // An attempt to optimize returns the Expression itself.
+ ASSERT_EQUALS(expression, expression->optimize());
+ }
+};
+
+/** No dependencies. */
+class Dependencies {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionConstant::create(Value(5));
+ DepsTracker dependencies;
+ expression->addDependencies(&dependencies);
+ ASSERT_EQUALS(0U, dependencies.fields.size());
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
+
+/** Output to BSONObj. */
+class AddToBsonObj {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionConstant::create(Value(5));
+ // The constant is replaced with a $ expression.
+ assertBinaryEqual(BSON("field" << BSON("$const" << 5)), toBsonObj(expression));
+ }
+
+private:
+ static BSONObj toBsonObj(const intrusive_ptr<Expression>& expression) {
+ return BSON("field" << expression->serialize(false));
+ }
+};
+
+/** Output to BSONArray. */
+class AddToBsonArray {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionConstant::create(Value(5));
+ // The constant is copied out as is.
+ assertBinaryEqual(constify(BSON_ARRAY(5)), toBsonArray(expression));
+ }
+
+private:
+ static BSONObj toBsonArray(const intrusive_ptr<Expression>& expression) {
+ BSONArrayBuilder bab;
+ bab << expression->serialize(false);
+ return bab.obj();
+ }
+};
+
+} // namespace Constant
+
+namespace FieldPath {
+
+/** The provided field path does not pass validation. */
+class Invalid {
+public:
+ void run() {
+ ASSERT_THROWS(ExpressionFieldPath::create(""), UserException);
+ }
+};
+
+/** No optimization is performed. */
+class Optimize {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a");
+ // An attempt to optimize returns the Expression itself.
+ ASSERT_EQUALS(expression, expression->optimize());
+ }
+};
+
+/** The field path itself is a dependency. */
+class Dependencies {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ DepsTracker dependencies;
+ expression->addDependencies(&dependencies);
+ ASSERT_EQUALS(1U, dependencies.fields.size());
+ ASSERT_EQUALS(1U, dependencies.fields.count("a.b"));
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
+
+/** Field path target field is missing. */
+class Missing {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a");
+ assertBinaryEqual(fromjson("{}"), toBson(expression->evaluate(Document())));
+ }
+};
+
+/** Simple case where the target field is present. */
+class Present {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a");
+ assertBinaryEqual(fromjson("{'':123}"),
+ toBson(expression->evaluate(fromBson(BSON("a" << 123)))));
+ }
+};
+
+/** Target field parent is null. */
+class NestedBelowNull {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{}"),
+ toBson(expression->evaluate(fromBson(fromjson("{a:null}")))));
+ }
+};
+
+/** Target field parent is undefined. */
+class NestedBelowUndefined {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{}"),
+ toBson(expression->evaluate(fromBson(fromjson("{a:undefined}")))));
+ }
+};
+
+/** Target field parent is missing. */
+class NestedBelowMissing {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{}"),
+ toBson(expression->evaluate(fromBson(fromjson("{z:1}")))));
+ }
+};
+
+/** Target field parent is an integer. */
+class NestedBelowInt {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{}"), toBson(expression->evaluate(fromBson(BSON("a" << 2)))));
+ }
+};
+
+/** A value in a nested object. */
+class NestedValue {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(BSON("" << 55),
+ toBson(expression->evaluate(fromBson(BSON("a" << BSON("b" << 55))))));
+ }
+};
+
+/** Target field within an empty object. */
+class NestedBelowEmptyObject {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{}"),
+ toBson(expression->evaluate(fromBson(BSON("a" << BSONObj())))));
+ }
+};
+
+/** Target field within an empty array. */
+class NestedBelowEmptyArray {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(BSON("" << BSONArray()),
+ toBson(expression->evaluate(fromBson(BSON("a" << BSONArray())))));
+ }
+};
+
+/** Target field within an array containing null. */
+class NestedBelowArrayWithNull {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{'':[]}"),
+ toBson(expression->evaluate(fromBson(fromjson("{a:[null]}")))));
+ }
+};
+
+/** Target field within an array containing undefined. */
+class NestedBelowArrayWithUndefined {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{'':[]}"),
+ toBson(expression->evaluate(fromBson(fromjson("{a:[undefined]}")))));
+ }
+};
+
+/** Target field within an array containing an integer. */
+class NestedBelowArrayWithInt {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{'':[]}"),
+ toBson(expression->evaluate(fromBson(fromjson("{a:[1]}")))));
+ }
+};
+
+/** Target field within an array. */
+class NestedWithinArray {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{'':[9]}"),
+ toBson(expression->evaluate(fromBson(fromjson("{a:[{b:9}]}")))));
+ }
+};
+
+/** Multiple value types within an array. */
+class MultipleArrayValues {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b");
+ assertBinaryEqual(fromjson("{'':[9,20]}"),
+ toBson(expression->evaluate(
+ fromBson(fromjson("{a:[{b:9},null,undefined,{g:4},{b:20},{}]}")))));
+ }
+};
+
+/** Expanding values within nested arrays. */
+class ExpandNestedArrays {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b.c");
+ assertBinaryEqual(fromjson("{'':[[1,2],3,[4],[[5]],[6,7]]}"),
+ toBson(expression->evaluate(fromBson(fromjson(
+ "{a:[{b:[{c:1},{c:2}]},"
+ "{b:{c:3}},"
+ "{b:[{c:4}]},"
+ "{b:[{c:[5]}]},"
+ "{b:{c:[6,7]}}]}")))));
+ }
+};
+
+/** Add to a BSONObj. */
+class AddToBsonObj {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b.c");
+ assertBinaryEqual(BSON("foo"
+ << "$a.b.c"),
+ BSON("foo" << expression->serialize(false)));
+ }
+};
+
+/** Add to a BSONArray. */
+class AddToBsonArray {
+public:
+ void run() {
+ intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b.c");
+ BSONArrayBuilder bab;
+ bab << expression->serialize(false);
+ assertBinaryEqual(BSON_ARRAY("$a.b.c"), bab.arr());
+ }
+};
+
+} // namespace FieldPath
+
+
+namespace Nary {
+
+/** A dummy child of ExpressionNary used for testing. */
+class Testable : public ExpressionNary {
+public:
+ virtual Value evaluateInternal(Variables* vars) const {
+ // Just put all the values in a list. This is not associative/commutative so
+ // the results will change if a factory is provided and operations are reordered.
+ vector<Value> values;
+ for (ExpressionVector::const_iterator i = vpOperand.begin(); i != vpOperand.end(); ++i) {
+ values.push_back((*i)->evaluateInternal(vars));
}
+ return Value(values);
+ }
+ virtual const char* getOpName() const {
+ return "$testable";
+ }
+ virtual bool isAssociativeAndCommutative() const {
+ return _isAssociativeAndCommutative;
+ }
+ static intrusive_ptr<Testable> create(bool associativeAndCommutative = false) {
+ return new Testable(associativeAndCommutative);
+ }
+ static intrusive_ptr<ExpressionNary> factory() {
+ return new Testable(true);
+ }
+ static intrusive_ptr<Testable> createFromOperands(const BSONArray& operands,
+ bool haveFactory = false) {
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Testable> testable = create(haveFactory);
+ BSONObjIterator i(operands);
+ while (i.more()) {
+ BSONElement element = i.next();
+ testable->addOperand(Expression::parseOperand(element, vps));
+ }
+ return testable;
+ }
+ void assertContents(const BSONArray& expectedContents) {
+ ASSERT_EQUALS(constify(BSON("$testable" << expectedContents)), expressionToBson(this));
+ }
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {}
- void run() {
- const Document spec = getSpec();
- const Value args = spec["input"];
- if (!spec["expected"].missing()) {
- FieldIterator fields(spec["expected"].getDocument());
- while (fields.more()) {
- const Document::FieldPair field(fields.next());
- const Value expected = field.second;
- const BSONObj obj = BSON(field.first << args);
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- Value result = expr->evaluate(Document());
- if (result.getType() == Array) {
- result = sortSet(result);
- }
- if (result != expected) {
- string errMsg = str::stream()
- << "for expression " << field.first.toString()
- << " with argument " << args.toString()
- << " full tree: " << expr->serialize(false).toString()
- << " expected: " << expected.toString()
- << " but got: " << result.toString();
- FAIL(errMsg);
- }
- //TODO test optimize here
- }
- }
- if (!spec["error"].missing()) {
- const vector<Value>& asserters = spec["error"].getArray();
- size_t n = asserters.size();
- for (size_t i = 0; i < n; i++) {
- const BSONObj obj = BSON(asserters[i].getString() << args);
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- ASSERT_THROWS({
- // NOTE: parse and evaluatation failures are treated the same
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- expr->evaluate(Document());
- }, UserException);
- }
- }
- }
- private:
- virtual Document getSpec() = 0;
- };
-
- class Same : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << DOC_ARRAY(1 << 2) )
- << "expected" << DOC("$setIsSubset" << true
- << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>() )
- );
+private:
+ Testable(bool isAssociativeAndCommutative)
+ : _isAssociativeAndCommutative(isAssociativeAndCommutative) {}
+ bool _isAssociativeAndCommutative;
+};
+
+/** Adding operands to the expression. */
+class AddOperand {
+public:
+ void run() {
+ intrusive_ptr<Testable> testable = Testable::create();
+ testable->addOperand(ExpressionConstant::create(Value(9)));
+ testable->assertContents(BSON_ARRAY(9));
+ testable->addOperand(ExpressionFieldPath::create("ab.c"));
+ testable->assertContents(BSON_ARRAY(9 << "$ab.c"));
+ }
+};
+
+/** Dependencies of the expression. */
+class Dependencies {
+public:
+ void run() {
+ intrusive_ptr<Testable> testable = Testable::create();
+
+ // No arguments.
+ assertDependencies(BSONArray(), testable);
+
+ // Add a constant argument.
+ testable->addOperand(ExpressionConstant::create(Value(1)));
+ assertDependencies(BSONArray(), testable);
+
+ // Add a field path argument.
+ testable->addOperand(ExpressionFieldPath::create("ab.c"));
+ assertDependencies(BSON_ARRAY("ab.c"), testable);
+
+ // Add an object expression.
+ BSONObj spec = BSON("" << BSON("a"
+ << "$x"
+ << "q"
+ << "$r"));
+ BSONElement specElement = spec.firstElement();
+ Expression::ObjectCtx ctx(Expression::ObjectCtx::DOCUMENT_OK);
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ testable->addOperand(Expression::parseObject(specElement.Obj(), &ctx, vps));
+ assertDependencies(BSON_ARRAY("ab.c"
+ << "r"
+ << "x"),
+ testable);
+ }
- }
- };
-
- class Redundant : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << DOC_ARRAY(1 << 2 << 2) )
- << "expected" << DOC("$setIsSubset" << true
- << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>() )
- );
+private:
+ void assertDependencies(const BSONArray& expectedDependencies,
+ const intrusive_ptr<Expression>& expression) {
+ DepsTracker dependencies;
+ expression->addDependencies(&dependencies);
+ BSONArrayBuilder dependenciesBson;
+ for (set<string>::const_iterator i = dependencies.fields.begin();
+ i != dependencies.fields.end();
+ ++i) {
+ dependenciesBson << *i;
+ }
+ ASSERT_EQUALS(expectedDependencies, dependenciesBson.arr());
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
+
+/** Serialize to an object. */
+class AddToBsonObj {
+public:
+ void run() {
+ intrusive_ptr<Testable> testable = Testable::create();
+ testable->addOperand(ExpressionConstant::create(Value(5)));
+ ASSERT_EQUALS(BSON("foo" << BSON("$testable" << BSON_ARRAY(BSON("$const" << 5)))),
+ BSON("foo" << testable->serialize(false)));
+ }
+};
+
+/** Serialize to an array. */
+class AddToBsonArray {
+public:
+ void run() {
+ intrusive_ptr<Testable> testable = Testable::create();
+ testable->addOperand(ExpressionConstant::create(Value(5)));
+ ASSERT_EQUALS(constify(BSON_ARRAY(BSON("$testable" << BSON_ARRAY(5)))),
+ BSON_ARRAY(testable->serialize(false)));
+ }
+};
+
+/** One operand is optimized to a constant, while another is left as is. */
+class OptimizeOneOperand {
+public:
+ void run() {
+ BSONArray spec = BSON_ARRAY(BSON("$and" << BSONArray()) << "$abc");
+ intrusive_ptr<Testable> testable = Testable::createFromOperands(spec);
+ testable->assertContents(spec);
+ ASSERT(testable == testable->optimize());
+ testable->assertContents(BSON_ARRAY(true << "$abc"));
+ }
+};
+
+/** All operands are constants, and the operator is evaluated with them. */
+class EvaluateAllConstantOperands {
+public:
+ void run() {
+ BSONArray spec = BSON_ARRAY(1 << 2);
+ intrusive_ptr<Testable> testable = Testable::createFromOperands(spec);
+ testable->assertContents(spec);
+ intrusive_ptr<Expression> optimized = testable->optimize();
+ ASSERT(testable != optimized);
+ ASSERT_EQUALS(BSON("$const" << BSON_ARRAY(1 << 2)), expressionToBson(optimized));
+ }
+};
+
+class NoFactoryOptimizeBase {
+public:
+ virtual ~NoFactoryOptimizeBase() {}
+ void run() {
+ intrusive_ptr<Testable> testable = createTestable();
+ // Without factory optimization, optimization will not produce a new expression.
+ ASSERT(testable == testable->optimize());
+ }
- }
- };
-
- class DoubleRedundant : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 1 << 2)
- << DOC_ARRAY(1 << 2 << 2) )
- << "expected" << DOC("$setIsSubset" << true
- << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>() )
- );
+protected:
+ virtual intrusive_ptr<Testable> createTestable() = 0;
+};
+
+/** A string constant prevents factory optimization. */
+class StringConstant : public NoFactoryOptimizeBase {
+ intrusive_ptr<Testable> createTestable() {
+ return Testable::createFromOperands(BSON_ARRAY("abc"
+ << "def"
+ << "$path"),
+ true);
+ }
+};
- }
- };
-
- class Super : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << DOC_ARRAY(1) )
- << "expected" << DOC("$setIsSubset" << false
- << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << DOC_ARRAY(2) )
- );
+/** A single (instead of multiple) constant prevents optimization. SERVER-6192 */
+class SingleConstant : public NoFactoryOptimizeBase {
+ intrusive_ptr<Testable> createTestable() {
+ return Testable::createFromOperands(BSON_ARRAY(55 << "$path"), true);
+ }
+};
- }
- };
-
- class SuperWithRedundant : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2 << 2)
- << DOC_ARRAY(1) )
- << "expected" << DOC("$setIsSubset" << false
- << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << DOC_ARRAY(2) )
- );
+/** Factory optimization is not used without a factory. */
+class NoFactory : public NoFactoryOptimizeBase {
+ intrusive_ptr<Testable> createTestable() {
+ return Testable::createFromOperands(BSON_ARRAY(55 << 66 << "$path"), false);
+ }
+};
+
+/** Factory optimization separates constant from non constant expressions. */
+class FactoryOptimize {
+public:
+ void run() {
+ intrusive_ptr<Testable> testable =
+ Testable::createFromOperands(BSON_ARRAY(55 << 66 << "$path"), true);
+ intrusive_ptr<Expression> optimized = testable->optimize();
+ // The constant expressions are evaluated separately and placed at the end.
+ ASSERT_EQUALS(constify(BSON("$testable" << BSON_ARRAY("$path" << BSON_ARRAY(55 << 66)))),
+ expressionToBson(optimized));
+ }
+};
+
+/** Factory optimization flattens nested operators of the same type. */
+class FlattenOptimize {
+public:
+ void run() {
+ intrusive_ptr<Testable> testable = Testable::createFromOperands(
+ BSON_ARRAY(55 << "$path" <<
+ // $and has a factory, but it's a different factory from
+ // $testable.
+ BSON("$add" << BSON_ARRAY(5 << 6 << "$q")) << 66),
+ true);
+ // Add a nested $testable operand.
+ testable->addOperand(
+ Testable::createFromOperands(BSON_ARRAY(99 << 100 << "$another_path"), true));
+ intrusive_ptr<Expression> optimized = testable->optimize();
+ ASSERT_EQUALS(constify(BSON("$testable" << BSON_ARRAY( // non constant parts
+ "$path" << BSON("$add" << BSON_ARRAY("$q" << 11))
+ << "$another_path" <<
+ // constant part last
+ BSON_ARRAY(55 << 66 << BSON_ARRAY(99 << 100))))),
+ expressionToBson(optimized));
+ }
+};
+
+/** Three layers of factory optimization are flattened. */
+class FlattenThreeLayers {
+public:
+ void run() {
+ intrusive_ptr<Testable> top =
+ Testable::createFromOperands(BSON_ARRAY(1 << 2 << "$a"), true);
+ intrusive_ptr<Testable> nested =
+ Testable::createFromOperands(BSON_ARRAY(3 << 4 << "$b"), true);
+ nested->addOperand(Testable::createFromOperands(BSON_ARRAY(5 << 6 << "$c"), true));
+ top->addOperand(nested);
+ intrusive_ptr<Expression> optimized = top->optimize();
+ ASSERT_EQUALS(
+ constify(BSON("$testable" << BSON_ARRAY(
+ "$a"
+ << "$b"
+ << "$c"
+ << BSON_ARRAY(1 << 2 << BSON_ARRAY(3 << 4 << BSON_ARRAY(5 << 6)))))),
+ expressionToBson(optimized));
+ }
+};
+
+} // namespace Nary
+
+namespace Object {
+
+class Base {
+protected:
+ void assertDependencies(const BSONArray& expectedDependencies,
+ const intrusive_ptr<ExpressionObject>& expression,
+ bool includePath = true) const {
+ vector<string> path;
+ DepsTracker dependencies;
+ expression->addDependencies(&dependencies, includePath ? &path : 0);
+ BSONArrayBuilder bab;
+ for (set<string>::const_iterator i = dependencies.fields.begin();
+ i != dependencies.fields.end();
+ ++i) {
+ bab << *i;
+ }
+ ASSERT_EQUALS(expectedDependencies, bab.arr());
+ ASSERT_EQUALS(false, dependencies.needWholeDocument);
+ ASSERT_EQUALS(false, dependencies.needTextScore);
+ }
+};
+
+class ExpectedResultBase : public Base {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ _expression = ExpressionObject::createRoot();
+ prepareExpression();
+ Document document = fromBson(source());
+ MutableDocument result;
+ Variables vars(0, document);
+ expression()->addToDocument(result, document, &vars);
+ assertBinaryEqual(expected(), toBson(result.freeze()));
+ assertDependencies(expectedDependencies(), _expression);
+ ASSERT_EQUALS(expectedBsonRepresentation(), expressionToBson(_expression));
+ ASSERT_EQUALS(expectedIsSimple(), _expression->isSimple());
+ }
- }
- };
-
- class Sub : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1)
- << DOC_ARRAY(1 << 2) )
- << "expected" << DOC("$setIsSubset" << true
- << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>() )
- );
+protected:
+ intrusive_ptr<ExpressionObject> expression() {
+ return _expression;
+ }
+ virtual BSONObj source() {
+ return BSON("_id" << 0 << "a" << 1 << "b" << 2);
+ }
+ virtual void prepareExpression() = 0;
+ virtual BSONObj expected() = 0;
+ virtual BSONArray expectedDependencies() = 0;
+ virtual BSONObj expectedBsonRepresentation() = 0;
+ virtual bool expectedIsSimple() {
+ return true;
+ }
- }
- };
-
- class SameBackwards : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << DOC_ARRAY(2 << 1) )
- << "expected" << DOC("$setIsSubset" << true
- << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>() )
- );
+private:
+ intrusive_ptr<ExpressionObject> _expression;
+};
- }
- };
-
- class NoOverlap : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << DOC_ARRAY(8 << 4) )
- << "expected" << DOC("$setIsSubset" << false
- << "$setEquals" << false
- << "$setIntersection" << vector<Value>()
- << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference" << DOC_ARRAY(1 << 2))
- );
+/** Empty object spec. */
+class Empty : public ExpectedResultBase {
+public:
+ void prepareExpression() {}
+ BSONObj expected() {
+ return BSON("_id" << 0);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSONObj();
+ }
+};
- }
- };
-
- class Overlap : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << DOC_ARRAY(8 << 2 << 4) )
- << "expected" << DOC("$setIsSubset" << false
- << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(2)
- << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference" << DOC_ARRAY(1))
- );
+/** Include 'a' field only. */
+class Include : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("a");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << 1);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << true);
+ }
+};
- }
- };
-
- class LastNull : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << Value(BSONNULL) )
- << "expected" << DOC("$setIntersection" << BSONNULL
- << "$setUnion" << BSONNULL
- << "$setDifference" << BSONNULL )
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset")
- );
+/** Cannot include missing 'a' field. */
+class MissingInclude : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0 << "b" << 2);
+ }
+ void prepareExpression() {
+ expression()->includePath("a");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << true);
+ }
+};
- }
- };
-
- class FirstNull : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( Value(BSONNULL)
- << DOC_ARRAY(1 << 2) )
- << "expected" << DOC("$setIntersection" << BSONNULL
- << "$setUnion" << BSONNULL
- << "$setDifference" << BSONNULL )
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset")
- );
+/** Include '_id' field only. */
+class IncludeId : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("_id");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("_id" << true);
+ }
+};
+
+/** Exclude '_id' field. */
+class ExcludeId : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("b");
+ expression()->excludeId(true);
+ }
+ BSONObj expected() {
+ return BSON("b" << 2);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("_id" << false << "b" << true);
+ }
+};
+
+/** Result order based on source document field order, not inclusion spec field order. */
+class SourceOrder : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("b");
+ expression()->includePath("a");
+ }
+ BSONObj expected() {
+ return source();
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a"
+ << "b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("b" << true << "a" << true);
+ }
+};
- }
- };
-
- class NoArg : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << vector<Value>()
- << "expected" << DOC("$setIntersection" << vector<Value>()
- << "$setUnion" << vector<Value>() )
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset"
- << "$setDifference")
- );
+/** Include a nested field. */
+class IncludeNested : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("a.b");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 5));
+ }
+ BSONObj source() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 5 << "c" << 6) << "z" << 2);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << true));
+ }
+};
+
+/** Include two nested fields. */
+class IncludeTwoNested : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("a.b");
+ expression()->includePath("a.c");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 5 << "c" << 6));
+ }
+ BSONObj source() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 5 << "c" << 6) << "z" << 2);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b"
+ << "a.c");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << true << "c" << true));
+ }
+};
+
+/** Include two fields nested within different parents. */
+class IncludeTwoParentNested : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("a.b");
+ expression()->includePath("c.d");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 5) << "c" << BSON("d" << 6));
+ }
+ BSONObj source() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 5) << "c" << BSON("d" << 6) << "z" << 2);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b"
+ << "c.d");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << true) << "c" << BSON("d" << true));
+ }
+};
- }
- };
-
- class OneArg : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2) )
- << "expected" << DOC("$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2) )
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset"
- << "$setDifference")
- );
+/** Attempt to include a missing nested field. */
+class IncludeMissingNested : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("a.b");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSONObj());
+ }
+ BSONObj source() {
+ return BSON("_id" << 0 << "a" << BSON("c" << 6) << "z" << 2);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << true));
+ }
+};
- }
- };
-
- class EmptyArg : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( vector<Value>() )
- << "expected" << DOC("$setIntersection" << vector<Value>()
- << "$setUnion" << vector<Value>() )
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset"
- << "$setDifference")
- );
+/** Attempt to include a nested field within a non object. */
+class IncludeNestedWithinNonObject : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("a.b");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0);
+ }
+ BSONObj source() {
+ return BSON("_id" << 0 << "a" << 2 << "z" << 2);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << true));
+ }
+};
- }
- };
-
- class LeftArgEmpty : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( vector<Value>()
- << DOC_ARRAY(1 << 2) )
- << "expected" << DOC("$setIntersection" << vector<Value>()
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setIsSubset" << true
- << "$setEquals" << false
- << "$setDifference" << vector<Value>() )
- );
+/** Include a nested field within an array. */
+class IncludeArrayNested : public ExpectedResultBase {
+public:
+ void prepareExpression() {
+ expression()->includePath("a.b");
+ }
+ BSONObj expected() {
+ return fromjson("{_id:0,a:[{b:5},{b:2},{}]}");
+ }
+ BSONObj source() {
+ return fromjson("{_id:0,a:[{b:5,c:6},{b:2,c:9},{c:7},[],2],z:1}");
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << true));
+ }
+};
- }
- };
-
- class RightArgEmpty : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2)
- << vector<Value>() )
- << "expected" << DOC("$setIntersection" << vector<Value>()
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setIsSubset" << false
- << "$setEquals" << false
- << "$setDifference" << DOC_ARRAY(1 << 2) )
- );
+/** Don't include not root '_id' field implicitly. */
+class ExcludeNonRootId : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0 << "a" << BSON("_id" << 1 << "b" << 1));
+ }
+ void prepareExpression() {
+ expression()->includePath("a.b");
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 1));
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << true));
+ }
+};
- }
- };
-
- class ManyArgs : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(8 << 3)
- << DOC_ARRAY("asdf" << "foo")
- << DOC_ARRAY(80.3 << 34)
- << vector<Value>()
- << DOC_ARRAY(80.3 << "foo" << 11 << "yay") )
- << "expected" << DOC("$setIntersection" << vector<Value>()
- << "$setEquals" << false
- << "$setUnion" << DOC_ARRAY(3
- << 8
- << 11
- << 34
- << 80.3
- << "asdf"
- << "foo"
- << "yay") )
- << "error" << DOC_ARRAY("$setIsSubset"
- << "$setDifference")
- );
+/** Project a computed expression. */
+class Computed : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << 5);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("$const" << 5));
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
- }
- };
-
- class ManyArgsEqual : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1 << 2 << 4)
- << DOC_ARRAY(1 << 2 << 2 << 4)
- << DOC_ARRAY(4 << 1 << 2)
- << DOC_ARRAY(2 << 1 << 1 << 4) )
- << "expected" << DOC("$setIntersection" << DOC_ARRAY(1 << 2 << 4)
- << "$setEquals" << true
- << "$setUnion" << DOC_ARRAY(1 << 2 << 4) )
- << "error" << DOC_ARRAY("$setIsSubset"
- << "$setDifference")
- );
+/** Project a computed expression replacing an existing field. */
+class ComputedReplacement : public Computed {
+ virtual BSONObj source() {
+ return BSON("_id" << 0 << "a" << 99);
+ }
+};
- }
- };
- } // namespace Set
+/** An undefined value is passed through */
+class ComputedUndefined : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a"),
+ ExpressionConstant::create(Value(BSONUndefined)));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSONUndefined);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return fromjson("{a:{$const:undefined}}");
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
- namespace Strcasecmp {
+/** Project a computed expression replacing an existing field with Undefined. */
+class ComputedUndefinedReplacement : public ComputedUndefined {
+ virtual BSONObj source() {
+ return BSON("_id" << 0 << "a" << 99);
+ }
+};
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {
- }
- void run() {
- assertResult( expectedResult(), spec() );
- assertResult( -expectedResult(), reverseSpec() );
- }
- protected:
- virtual string a() = 0;
- virtual string b() = 0;
- virtual int expectedResult() = 0;
- private:
- BSONObj spec() { return BSON( "$strcasecmp" << BSON_ARRAY( a() << b() ) ); }
- BSONObj reverseSpec() { return BSON( "$strcasecmp" << BSON_ARRAY( b() << a() ) ); }
- void assertResult( int expectedResult, const BSONObj& spec ) {
- BSONObj specObj = BSON( "" << spec );
- BSONElement specElement = specObj.firstElement();
+/** A null value is projected. */
+class ComputedNull : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(BSONNULL)));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSONNULL);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("$const" << BSONNULL));
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** A nested value is projected. */
+class ComputedNested : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(5)));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 5));
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b" << BSON("$const" << 5)));
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** A field path is projected. */
+class ComputedFieldPath : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0 << "x" << 4);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a"), ExpressionFieldPath::create("x"));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << 4);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "x");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a"
+ << "$x");
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** A nested field path is projected. */
+class ComputedNestedFieldPath : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0 << "x" << BSON("y" << 4));
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a.b"), ExpressionFieldPath::create("x.y"));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 4));
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "x.y");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return BSON("a" << BSON("b"
+ << "$x.y"));
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** An empty subobject expression for a missing field is not projected. */
+class EmptyNewSubobject : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ // Create a sub expression returning an empty object.
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->addField(mongo::FieldPath("b"), ExpressionFieldPath::create("a.b"));
+ expression()->addField(mongo::FieldPath("a"), subExpression);
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0);
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id"
+ << "a.b");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return fromjson("{a:{b:'$a.b'}}");
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** A non empty subobject expression for a missing field is projected. */
+class NonEmptyNewSubobject : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ // Create a sub expression returning an empty object.
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->addField(mongo::FieldPath("b"), ExpressionConstant::create(Value(6)));
+ expression()->addField(mongo::FieldPath("a"), subExpression);
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 6));
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return fromjson("{a:{b:{$const:6}}}");
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** Two computed fields within a common parent. */
+class AdjacentDottedComputedFields : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(6)));
+ expression()->addField(mongo::FieldPath("a.c"), ExpressionConstant::create(Value(7)));
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 6 << "c" << 7));
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return fromjson("{a:{b:{$const:6},c:{$const:7}}}");
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** Two computed fields within a common parent, in one case dotted. */
+class AdjacentDottedAndNestedComputedFields : public AdjacentDottedComputedFields {
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(6)));
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->addField(mongo::FieldPath("c"), ExpressionConstant::create(Value(7)));
+ expression()->addField(mongo::FieldPath("a"), subExpression);
+ }
+};
+
+/** Two computed fields within a common parent, in another case dotted. */
+class AdjacentNestedAndDottedComputedFields : public AdjacentDottedComputedFields {
+ void prepareExpression() {
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->addField(mongo::FieldPath("b"), ExpressionConstant::create(Value(6)));
+ expression()->addField(mongo::FieldPath("a"), subExpression);
+ expression()->addField(mongo::FieldPath("a.c"), ExpressionConstant::create(Value(7)));
+ }
+};
+
+/** Two computed fields within a common parent, nested rather than dotted. */
+class AdjacentNestedComputedFields : public AdjacentDottedComputedFields {
+ void prepareExpression() {
+ intrusive_ptr<ExpressionObject> firstSubExpression = ExpressionObject::create();
+ firstSubExpression->addField(mongo::FieldPath("b"), ExpressionConstant::create(Value(6)));
+ expression()->addField(mongo::FieldPath("a"), firstSubExpression);
+ intrusive_ptr<ExpressionObject> secondSubExpression = ExpressionObject::create();
+ secondSubExpression->addField(mongo::FieldPath("c"), ExpressionConstant::create(Value(7)));
+ expression()->addField(mongo::FieldPath("a"), secondSubExpression);
+ }
+};
+
+/** Field ordering is preserved when nested fields are merged. */
+class AdjacentNestedOrdering : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(6)));
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ // Add field 'd' then 'c'. Expect the same field ordering in the result doc.
+ subExpression->addField(mongo::FieldPath("d"), ExpressionConstant::create(Value(7)));
+ subExpression->addField(mongo::FieldPath("c"), ExpressionConstant::create(Value(8)));
+ expression()->addField(mongo::FieldPath("a"), subExpression);
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << 6 << "d" << 7 << "c" << 8));
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return fromjson("{a:{b:{$const:6},d:{$const:7},c:{$const:8}}}");
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** Adjacent fields two levels deep. */
+class MultipleNestedFields : public ExpectedResultBase {
+public:
+ virtual BSONObj source() {
+ return BSON("_id" << 0);
+ }
+ void prepareExpression() {
+ expression()->addField(mongo::FieldPath("a.b.c"), ExpressionConstant::create(Value(6)));
+ intrusive_ptr<ExpressionObject> bSubExpression = ExpressionObject::create();
+ bSubExpression->addField(mongo::FieldPath("d"), ExpressionConstant::create(Value(7)));
+ intrusive_ptr<ExpressionObject> aSubExpression = ExpressionObject::create();
+ aSubExpression->addField(mongo::FieldPath("b"), bSubExpression);
+ expression()->addField(mongo::FieldPath("a"), aSubExpression);
+ }
+ BSONObj expected() {
+ return BSON("_id" << 0 << "a" << BSON("b" << BSON("c" << 6 << "d" << 7)));
+ }
+ BSONArray expectedDependencies() {
+ return BSON_ARRAY("_id");
+ }
+ BSONObj expectedBsonRepresentation() {
+ return fromjson("{a:{b:{c:{$const:6},d:{$const:7}}}}");
+ }
+ bool expectedIsSimple() {
+ return false;
+ }
+};
+
+/** Two expressions cannot generate the same field. */
+class ConflictingExpressionFields : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ ASSERT_THROWS(expression->addField(mongo::FieldPath("a"), // Duplicate field.
+ ExpressionConstant::create(Value(6))),
+ UserException);
+ }
+};
+
+/** An expression field conflicts with an inclusion field. */
+class ConflictingInclusionExpressionFields : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->includePath("a");
+ ASSERT_THROWS(
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(6))),
+ UserException);
+ }
+};
+
+/** An inclusion field conflicts with an expression field. */
+class ConflictingExpressionInclusionFields : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ ASSERT_THROWS(expression->includePath("a"), UserException);
+ }
+};
+
+/** An object expression conflicts with a constant expression. */
+class ConflictingObjectConstantExpressionFields : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->includePath("b");
+ expression->addField(mongo::FieldPath("a"), subExpression);
+ ASSERT_THROWS(
+ expression->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(6))),
+ UserException);
+ }
+};
+
+/** A constant expression conflicts with an object expression. */
+class ConflictingConstantObjectExpressionFields : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(6)));
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->includePath("b");
+ ASSERT_THROWS(expression->addField(mongo::FieldPath("a"), subExpression), UserException);
+ }
+};
+
+/** Two nested expressions cannot generate the same field. */
+class ConflictingNestedFields : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(5)));
+ ASSERT_THROWS(expression->addField(mongo::FieldPath("a.b"), // Duplicate field.
+ ExpressionConstant::create(Value(6))),
+ UserException);
+ }
+};
+
+/** An expression cannot be created for a subfield of another expression. */
+class ConflictingFieldAndSubfield : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ ASSERT_THROWS(
+ expression->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(5))),
+ UserException);
+ }
+};
+
+/** An expression cannot be created for a nested field of another expression. */
+class ConflictingFieldAndNestedField : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->addField(mongo::FieldPath("b"), ExpressionConstant::create(Value(5)));
+ ASSERT_THROWS(expression->addField(mongo::FieldPath("a"), subExpression), UserException);
+ }
+};
+
+/** An expression cannot be created for a parent field of another expression. */
+class ConflictingSubfieldAndField : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a.b"), ExpressionConstant::create(Value(5)));
+ ASSERT_THROWS(
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5))),
+ UserException);
+ }
+};
+
+/** An expression cannot be created for a parent of a nested field. */
+class ConflictingNestedFieldAndField : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ intrusive_ptr<ExpressionObject> subExpression = ExpressionObject::create();
+ subExpression->addField(mongo::FieldPath("b"), ExpressionConstant::create(Value(5)));
+ expression->addField(mongo::FieldPath("a"), subExpression);
+ ASSERT_THROWS(
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5))),
+ UserException);
+ }
+};
+
+/** Dependencies for non inclusion expressions. */
+class NonInclusionDependencies : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ assertDependencies(BSON_ARRAY("_id"), expression, true);
+ assertDependencies(BSONArray(), expression, false);
+ expression->addField(mongo::FieldPath("b"), ExpressionFieldPath::create("c.d"));
+ assertDependencies(BSON_ARRAY("_id"
+ << "c.d"),
+ expression,
+ true);
+ assertDependencies(BSON_ARRAY("c.d"), expression, false);
+ }
+};
+
+/** Dependencies for inclusion expressions. */
+class InclusionDependencies : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->includePath("a");
+ assertDependencies(BSON_ARRAY("_id"
+ << "a"),
+ expression,
+ true);
+ DepsTracker unused;
+ // 'path' must be provided for inclusion expressions.
+ ASSERT_THROWS(expression->addDependencies(&unused), UserException);
+ }
+};
+
+/** Optimizing an object expression optimizes its sub expressions. */
+class Optimize : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ // Add inclusion.
+ expression->includePath("a");
+ // Add non inclusion.
+ intrusive_ptr<Expression> andExpr = new ExpressionAnd();
+ expression->addField(mongo::FieldPath("b"), andExpr);
+ expression->optimize();
+ // Optimizing 'expression' optimizes its non inclusion sub expressions, while
+ // inclusion sub expressions are passed through.
+ ASSERT_EQUALS(BSON("a" << true << "b" << BSON("$const" << true)),
+ expressionToBson(expression));
+ }
+};
+
+/** Serialize to a BSONObj. */
+class AddToBsonObj : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ ASSERT_EQUALS(constify(BSON("foo" << BSON("a" << 5))),
+ BSON("foo" << expression->serialize(false)));
+ }
+};
+
+/** Serialize to a BSONObj, with constants represented by expressions. */
+class AddToBsonObjRequireExpression : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ ASSERT_EQUALS(BSON("foo" << BSON("a" << BSON("$const" << 5))),
+ BSON("foo" << expression->serialize(false)));
+ }
+};
+
+/** Serialize to a BSONArray. */
+class AddToBsonArray : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->addField(mongo::FieldPath("a"), ExpressionConstant::create(Value(5)));
+ BSONArrayBuilder bab;
+ bab << expression->serialize(false);
+ ASSERT_EQUALS(constify(BSON_ARRAY(BSON("a" << 5))), bab.arr());
+ }
+};
+
+/**
+ * evaluate() does not supply an inclusion document. Inclusion spec'd fields are not
+ * included. (Inclusion specs are not generally expected/allowed in cases where evaluate
+ * is called instead of addToDocument.)
+ */
+class Evaluate : public Base {
+public:
+ void run() {
+ intrusive_ptr<ExpressionObject> expression = ExpressionObject::createRoot();
+ expression->includePath("a");
+ expression->addField(mongo::FieldPath("b"), ExpressionConstant::create(Value(5)));
+ expression->addField(mongo::FieldPath("c"), ExpressionFieldPath::create("a"));
+ ASSERT_EQUALS(
+ BSON("b" << 5 << "c" << 1),
+ toBson(expression->evaluate(fromBson(BSON("_id" << 0 << "a" << 1))).getDocument()));
+ }
+};
+
+} // namespace Object
+
+namespace Or {
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_EQUALS(BSON("" << expectedResult()),
+ toBson(expression->evaluate(fromBson(BSON("a" << 1)))));
+ intrusive_ptr<Expression> optimized = expression->optimize();
+ ASSERT_EQUALS(BSON("" << expectedResult()),
+ toBson(optimized->evaluate(fromBson(BSON("a" << 1)))));
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual bool expectedResult() = 0;
+};
+
+class OptimizeBase {
+public:
+ virtual ~OptimizeBase() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ intrusive_ptr<Expression> optimized = expression->optimize();
+ ASSERT_EQUALS(expectedOptimized(), expressionToBson(optimized));
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual BSONObj expectedOptimized() = 0;
+};
+
+class NoOptimizeBase : public OptimizeBase {
+ BSONObj expectedOptimized() {
+ return constify(spec());
+ }
+};
+
+/** $or without operands. */
+class NoOperands : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSONArray());
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $or passed 'true'. */
+class True : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(true));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $or passed 'false'. */
+class False : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $or passed 'true', 'true'. */
+class TrueTrue : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(true << true));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $or passed 'true', 'false'. */
+class TrueFalse : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(true << false));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $or passed 'false', 'true'. */
+class FalseTrue : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(false << true));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $or passed 'false', 'false'. */
+class FalseFalse : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(false << false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $or passed 'false', 'false', 'false'. */
+class FalseFalseFalse : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(false << false << false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $or passed 'false', 'false', 'true'. */
+class FalseFalseTrue : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(false << false << true));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $or passed '0', '1'. */
+class ZeroOne : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(0 << 1));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** $or passed '0', 'false'. */
+class ZeroFalse : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(0 << false));
+ }
+ bool expectedResult() {
+ return false;
+ }
+};
+
+/** $or passed a field path. */
+class FieldPath : public ExpectedResultBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY("$a"));
+ }
+ bool expectedResult() {
+ return true;
+ }
+};
+
+/** A constant expression is optimized to a constant. */
+class OptimizeConstantExpression : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(1));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+/** A non constant expression is not optimized. */
+class NonConstant : public NoOptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY("$a"));
+ }
+};
+
+/** An expression beginning with a single constant is optimized. */
+class ConstantNonConstantTrue : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(1 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+/** An expression beginning with a single constant is optimized. */
+class ConstantNonConstantFalse : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(0 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+ // note: using $and as serialization of ExpressionCoerceToBool rather than ExpressionAnd
+};
+
+/** An expression with a field path and '1'. */
+class NonConstantOne : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY("$a" << 1));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+/** An expression with a field path and '0'. */
+class NonConstantZero : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY("$a" << 0));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+};
+
+/** An expression with two field paths and '1'. */
+class NonConstantNonConstantOne : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY("$a"
+ << "$b" << 1));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+/** An expression with two field paths and '0'. */
+class NonConstantNonConstantZero : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY("$a"
+ << "$b" << 0));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$or" << BSON_ARRAY("$a"
+ << "$b"));
+ }
+};
+
+/** An expression with '0', '1', and a field path. */
+class ZeroOneNonConstant : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(0 << 1 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+/** An expression with '0', '0', and a field path. */
+class ZeroZeroNonConstant : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(0 << 0 << "$a"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$and" << BSON_ARRAY("$a"));
+ }
+};
+
+/** Nested $or expressions. */
+class Nested : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(0 << BSON("$or" << BSON_ARRAY(0)) << "$a"
+ << "$b"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$or" << BSON_ARRAY("$a"
+ << "$b"));
+ }
+};
+
+/** Nested $or expressions containing a nested value evaluating to false. */
+class NestedOne : public OptimizeBase {
+ BSONObj spec() {
+ return BSON("$or" << BSON_ARRAY(0 << BSON("$or" << BSON_ARRAY(BSON("$or" << BSON_ARRAY(1))))
+ << "$a"
+ << "$b"));
+ }
+ BSONObj expectedOptimized() {
+ return BSON("$const" << true);
+ }
+};
+
+} // namespace Or
+
+namespace Parse {
+
+namespace Object {
+
+class Base {
+public:
+ virtual ~Base() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ Expression::ObjectCtx context = objectCtx();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression =
+ Expression::parseObject(specElement.Obj(), &context, vps);
+ ASSERT_EQUALS(expectedBson(), expressionToBson(expression));
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual Expression::ObjectCtx objectCtx() {
+ return Expression::ObjectCtx(Expression::ObjectCtx::DOCUMENT_OK);
+ }
+ virtual BSONObj expectedBson() {
+ return constify(spec());
+ }
+};
+
+class ParseError {
+public:
+ virtual ~ParseError() {}
+ void run() {
+ BSONObj specObject = BSON("" << spec());
+ BSONElement specElement = specObject.firstElement();
+ Expression::ObjectCtx context = objectCtx();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ ASSERT_THROWS(Expression::parseObject(specElement.Obj(), &context, vps), UserException);
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual Expression::ObjectCtx objectCtx() {
+ return Expression::ObjectCtx(Expression::ObjectCtx::DOCUMENT_OK);
+ }
+};
+
+/** The spec must be an object. */
+class NonObject {
+public:
+ void run() {
+ BSONObj specObject = BSON("" << 1);
+ BSONElement specElement = specObject.firstElement();
+ Expression::ObjectCtx context = Expression::ObjectCtx(Expression::ObjectCtx::DOCUMENT_OK);
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ ASSERT_THROWS(Expression::parseObject(specElement.Obj(), &context, vps), UserException);
+ }
+};
+
+/** Empty object. */
+class Empty : public Base {
+ BSONObj spec() {
+ return BSONObj();
+ }
+};
+
+/** Operator spec object. */
+class Operator : public Base {
+ BSONObj spec() {
+ return BSON("$and" << BSONArray());
+ }
+};
+
+/** Invalid operator not allowed. */
+class InvalidOperator : public ParseError {
+ BSONObj spec() {
+ return BSON("$invalid" << 1);
+ }
+};
+
+/** Two operators not allowed. */
+class TwoOperators : public ParseError {
+ BSONObj spec() {
+ return BSON("$and" << BSONArray() << "$or" << BSONArray());
+ }
+};
+
+/** An operator must be the first and only field. */
+class OperatorLaterField : public ParseError {
+ BSONObj spec() {
+ return BSON("a" << BSON("$and" << BSONArray()) << "$or" << BSONArray());
+ }
+};
+
+/** An operator must be the first and only field. */
+class OperatorAndOtherField : public ParseError {
+ BSONObj spec() {
+ return BSON("$and" << BSONArray() << "a" << BSON("$or" << BSONArray()));
+ }
+};
+
+/** Operators not allowed at the top level of a projection. */
+class OperatorTopLevel : public ParseError {
+ BSONObj spec() {
+ return BSON("$and" << BSONArray());
+ }
+ Expression::ObjectCtx objectCtx() {
+ return Expression::ObjectCtx(Expression::ObjectCtx::DOCUMENT_OK |
+ Expression::ObjectCtx::TOP_LEVEL);
+ }
+};
+
+/** Dotted fields are not generally allowed. */
+class Dotted : public ParseError {
+ BSONObj spec() {
+ return BSON("a.b" << BSON("$and" << BSONArray()));
+ }
+};
+
+/** Dotted fields are allowed at the top level. */
+class DottedTopLevel : public Base {
+ BSONObj spec() {
+ return BSON("a.b" << BSON("$and" << BSONArray()));
+ }
+ Expression::ObjectCtx objectCtx() {
+ return Expression::ObjectCtx(Expression::ObjectCtx::DOCUMENT_OK |
+ Expression::ObjectCtx::TOP_LEVEL);
+ }
+ BSONObj expectedBson() {
+ return BSON("a" << BSON("b" << BSON("$and" << BSONArray())));
+ }
+};
+
+/** Nested spec. */
+class Nested : public Base {
+ BSONObj spec() {
+ return BSON("a" << BSON("$and" << BSONArray()));
+ }
+};
+
+/** Parse error in nested document. */
+class NestedParseError : public ParseError {
+ BSONObj spec() {
+ return BSON("a" << BSON("$and" << BSONArray() << "$or" << BSONArray()));
+ }
+};
+
+/** FieldPath expression. */
+class FieldPath : public Base {
+ BSONObj spec() {
+ return BSON("a"
+ << "$field");
+ }
+};
+
+/** Invalid FieldPath expression. */
+class InvalidFieldPath : public ParseError {
+ BSONObj spec() {
+ return BSON("a"
+ << "$field.");
+ }
+};
+
+/** Non FieldPath string. */
+class NonFieldPathString : public ParseError {
+ BSONObj spec() {
+ return BSON("a"
+ << "foo");
+ }
+};
+
+/** Inclusion spec not allowed. */
+class DisallowedInclusion : public ParseError {
+ BSONObj spec() {
+ return BSON("a" << 1);
+ }
+};
+
+class InclusionBase : public Base {
+ Expression::ObjectCtx objectCtx() {
+ return Expression::ObjectCtx(Expression::ObjectCtx::DOCUMENT_OK |
+ Expression::ObjectCtx::INCLUSION_OK);
+ }
+ BSONObj expectedBson() {
+ return BSON("a" << true);
+ }
+};
+
+/** Inclusion with bool type. */
+class InclusionBool : public InclusionBase {
+ BSONObj spec() {
+ return BSON("a" << true);
+ }
+};
+
+/** Inclusion with double type. */
+class InclusionDouble : public InclusionBase {
+ BSONObj spec() {
+ return BSON("a" << 1.0);
+ }
+};
+
+/** Inclusion with int type. */
+class InclusionInt : public InclusionBase {
+ BSONObj spec() {
+ return BSON("a" << 1);
+ }
+};
+
+/** Inclusion with long type. */
+class InclusionLong : public InclusionBase {
+ BSONObj spec() {
+ return BSON("a" << 1LL);
+ }
+};
+
+/** Inclusion of a nested field. */
+class NestedInclusion : public InclusionBase {
+ BSONObj spec() {
+ return BSON("a" << BSON("b" << true));
+ }
+ BSONObj expectedBson() {
+ return spec();
+ }
+};
+
+/** Exclude _id. */
+class ExcludeId : public Base {
+ BSONObj spec() {
+ return BSON("_id" << 0);
+ }
+ Expression::ObjectCtx objectCtx() {
+ return Expression::ObjectCtx(Expression::ObjectCtx::DOCUMENT_OK |
+ Expression::ObjectCtx::TOP_LEVEL);
+ }
+ BSONObj expectedBson() {
+ return BSON("_id" << false);
+ }
+};
+
+/** Excluding non _id field not allowed. */
+class ExcludeNonId : public ParseError {
+ BSONObj spec() {
+ return BSON("a" << 0);
+ }
+};
+
+/** Excluding _id not top level. */
+class ExcludeIdNotTopLevel : public ParseError {
+ BSONObj spec() {
+ return BSON("_id" << 0);
+ }
+};
+
+/** Invalid value type. */
+class InvalidType : public ParseError {
+ BSONObj spec() {
+ return BSON("a" << BSONNULL);
+ }
+};
+
+} // namespace Object
+
+namespace Expression {
+
+using mongo::Expression;
+
+class Base {
+public:
+ virtual ~Base() {}
+ void run() {
+ BSONObj specObject = spec();
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseExpression(specElement, vps);
+ ASSERT_EQUALS(constify(expectedBson()), expressionToBson(expression));
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual BSONObj expectedBson() {
+ return constify(spec());
+ }
+};
+
+class ParseError {
+public:
+ virtual ~ParseError() {}
+ void run() {
+ BSONObj specObject = spec();
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ ASSERT_THROWS(Expression::parseExpression(specElement, vps), UserException);
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+};
+
+/** A constant expression. */
+class Const : public Base {
+ BSONObj spec() {
+ return BSON("$const" << 5);
+ }
+};
+
+/** An expression with an invalid name. */
+class InvalidName : public ParseError {
+ BSONObj spec() {
+ return BSON("$invalid" << 1);
+ }
+};
+
+/** An expression requiring an array that is not provided with an array. */
+class RequiredArrayMissing : public ParseError {
+ BSONObj spec() {
+ return BSON("$strcasecmp"
+ << "foo");
+ }
+};
+
+/** An expression with the wrong number of operands. */
+class IncorrectOperandCount : public ParseError {
+ BSONObj spec() {
+ return BSON("$strcasecmp" << BSON_ARRAY("foo"));
+ }
+};
+
+/** An expression with the correct number of operands. */
+class CorrectOperandCount : public Base {
+ BSONObj spec() {
+ return BSON("$strcasecmp" << BSON_ARRAY("foo"
+ << "FOO"));
+ }
+};
+
+/** An variable argument expression with zero operands. */
+class ZeroOperands : public Base {
+ BSONObj spec() {
+ return BSON("$and" << BSONArray());
+ }
+};
+
+/** An variable argument expression with one operand. */
+class OneOperand : public Base {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(1));
+ }
+};
+
+/** An variable argument expression with two operands. */
+class TwoOperands : public Base {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(1 << 2));
+ }
+};
+
+/** An variable argument expression with a singleton operand. */
+class SingletonOperandVariable : public Base {
+ BSONObj spec() {
+ return BSON("$and" << 1);
+ }
+ BSONObj expectedBson() {
+ return BSON("$and" << BSON_ARRAY(1));
+ }
+};
+
+/** An fixed argument expression with a singleton operand. */
+class SingletonOperandFixed : public Base {
+ BSONObj spec() {
+ return BSON("$not" << 1);
+ }
+ BSONObj expectedBson() {
+ return BSON("$not" << BSON_ARRAY(1));
+ }
+};
+
+/** An object can be provided as a singleton argument. */
+class ObjectSingleton : public Base {
+ BSONObj spec() {
+ return BSON("$and" << BSON("$const" << 1));
+ }
+ BSONObj expectedBson() {
+ return BSON("$and" << BSON_ARRAY(BSON("$const" << 1)));
+ }
+};
+
+/** An object can be provided as an array agrument. */
+class ObjectOperand : public Base {
+ BSONObj spec() {
+ return BSON("$and" << BSON_ARRAY(BSON("$const" << 1)));
+ }
+ BSONObj expectedBson() {
+ return BSON("$and" << BSON_ARRAY(1));
+ }
+};
+
+} // namespace Expression
+
+namespace Operand {
+
+class Base {
+public:
+ virtual ~Base() {}
+ void run() {
+ BSONObj specObject = spec();
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<mongo::Expression> expression =
+ mongo::Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(expectedBson(), expressionToBson(expression));
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+ virtual BSONObj expectedBson() {
+ return constify(spec());
+ }
+};
+
+class ParseError {
+public:
+ virtual ~ParseError() {}
+ void run() {
+ BSONObj specObject = spec();
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ ASSERT_THROWS(mongo::Expression::parseOperand(specElement, vps), UserException);
+ }
+
+protected:
+ virtual BSONObj spec() = 0;
+};
+
+/** A field path operand. */
+class FieldPath {
+public:
+ void run() {
+ BSONObj specObject = BSON(""
+ << "$field");
+ BSONElement specElement = specObject.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<mongo::Expression> expression =
+ mongo::Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(specObject, BSON("" << expression->serialize(false)));
+ }
+};
+
+/** A string constant (not field path) operand. */
+class NonFieldPathString : public Base {
+ BSONObj spec() {
+ return BSON(""
+ << "foo");
+ }
+ BSONObj expectedBson() {
+ return BSON("$const"
+ << "foo");
+ }
+};
+
+/** An object operand. */
+class Object : public Base {
+ BSONObj spec() {
+ return BSON("" << BSON("$and" << BSONArray()));
+ }
+ BSONObj expectedBson() {
+ return BSON("$and" << BSONArray());
+ }
+};
+
+/** An inclusion operand. */
+class InclusionObject : public ParseError {
+ BSONObj spec() {
+ return BSON("" << BSON("a" << 1));
+ }
+};
+
+/** A constant operand. */
+class Constant : public Base {
+ BSONObj spec() {
+ return BSON("" << 5);
+ }
+ BSONObj expectedBson() {
+ return BSON("$const" << 5);
+ }
+};
+
+} // namespace Operand
+
+} // namespace Parse
+
+namespace Set {
+Value sortSet(Value set) {
+ if (set.nullish()) {
+ return Value(BSONNULL);
+ }
+ vector<Value> sortedSet = set.getArray();
+ std::sort(sortedSet.begin(), sortedSet.end());
+ return Value(sortedSet);
+}
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ const Document spec = getSpec();
+ const Value args = spec["input"];
+ if (!spec["expected"].missing()) {
+ FieldIterator fields(spec["expected"].getDocument());
+ while (fields.more()) {
+ const Document::FieldPair field(fields.next());
+ const Value expected = field.second;
+ const BSONObj obj = BSON(field.first << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec ), expressionToBson( expression ) );
- ASSERT_EQUALS( BSON( "" << expectedResult ),
- toBson( expression->evaluate( Document() ) ) );
- }
- };
-
- class NullBegin : public ExpectedResultBase {
- string a() { return string( "\0ab", 3 ); }
- string b() { return string( "\0AB", 3 ); }
- int expectedResult() { return 0; }
- };
-
- class NullEnd : public ExpectedResultBase {
- string a() { return string( "ab\0", 3 ); }
- string b() { return string( "aB\0", 3 ); }
- int expectedResult() { return 0; }
- };
-
- class NullMiddleLt : public ExpectedResultBase {
- string a() { return string( "a\0a", 3 ); }
- string b() { return string( "a\0B", 3 ); }
- int expectedResult() { return -1; }
- };
-
- class NullMiddleEq : public ExpectedResultBase {
- string a() { return string( "a\0b", 3 ); }
- string b() { return string( "a\0B", 3 ); }
- int expectedResult() { return 0; }
- };
-
- class NullMiddleGt : public ExpectedResultBase {
- string a() { return string( "a\0c", 3 ); }
- string b() { return string( "a\0B", 3 ); }
- int expectedResult() { return 1; }
- };
-
- } // namespace Strcasecmp
-
- namespace Substr {
-
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ Value result = expr->evaluate(Document());
+ if (result.getType() == Array) {
+ result = sortSet(result);
+ }
+ if (result != expected) {
+ string errMsg = str::stream()
+ << "for expression " << field.first.toString() << " with argument "
+ << args.toString() << " full tree: " << expr->serialize(false).toString()
+ << " expected: " << expected.toString()
+ << " but got: " << result.toString();
+ FAIL(errMsg);
+ }
+ // TODO test optimize here
}
- void run() {
- BSONObj specObj = BSON( "" << spec() );
- BSONElement specElement = specObj.firstElement();
+ }
+ if (!spec["error"].missing()) {
+ const vector<Value>& asserters = spec["error"].getArray();
+ size_t n = asserters.size();
+ for (size_t i = 0; i < n; i++) {
+ const BSONObj obj = BSON(asserters[i].getString() << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- ASSERT_EQUALS( BSON( "" << expectedResult() ),
- toBson( expression->evaluate( Document() ) ) );
- }
- protected:
- virtual string str() = 0;
- virtual int offset() = 0;
- virtual int length() = 0;
- virtual string expectedResult() = 0;
- private:
- BSONObj spec() {
- return BSON( "$substr" << BSON_ARRAY( str() << offset() << length() ) );
+ ASSERT_THROWS({
+ // NOTE: parse and evaluatation failures are treated the same
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ expr->evaluate(Document());
+ }, UserException);
}
- };
-
- /** Retrieve a full string containing a null character. */
- class FullNull : public ExpectedResultBase {
- string str() { return string( "a\0b", 3 ); }
- int offset() { return 0; }
- int length() { return 3; }
- string expectedResult() { return str(); }
- };
-
- /** Retrieve a substring beginning with a null character. */
- class BeginAtNull : public ExpectedResultBase {
- string str() { return string( "a\0b", 3 ); }
- int offset() { return 1; }
- int length() { return 2; }
- string expectedResult() { return string( "\0b", 2 ); }
- };
-
- /** Retrieve a substring ending with a null character. */
- class EndAtNull : public ExpectedResultBase {
- string str() { return string( "a\0b", 3 ); }
- int offset() { return 0; }
- int length() { return 2; }
- string expectedResult() { return string( "a\0", 2 ); }
- };
-
- /** Drop a beginning null character. */
- class DropBeginningNull : public ExpectedResultBase {
- string str() { return string( "\0b", 2 ); }
- int offset() { return 1; }
- int length() { return 1; }
- string expectedResult() { return "b"; }
- };
-
- /** Drop an ending null character. */
- class DropEndingNull : public ExpectedResultBase {
- string str() { return string( "a\0", 2 ); }
- int offset() { return 0; }
- int length() { return 1; }
- string expectedResult() { return "a"; }
- };
-
- } // namespace Substr
-
- namespace ToLower {
-
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {
- }
- void run() {
- BSONObj specObj = BSON( "" << spec() );
- BSONElement specElement = specObj.firstElement();
+ }
+ }
+
+private:
+ virtual Document getSpec() = 0;
+};
+
+class Same : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2)) << "expected"
+ << DOC("$setIsSubset" << true << "$setEquals" << true
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
+ }
+};
+
+class Redundant : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
+ << DOC("$setIsSubset" << true << "$setEquals" << true
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
+ }
+};
+
+class DoubleRedundant : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC(
+ "input" << DOC_ARRAY(DOC_ARRAY(1 << 1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
+ << DOC("$setIsSubset" << true << "$setEquals" << true << "$setIntersection"
+ << DOC_ARRAY(1 << 2) << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
+ }
+};
+
+class Super : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1)) << "expected"
+ << DOC("$setIsSubset" << false << "$setEquals" << false
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
+ }
+};
+
+class SuperWithRedundant : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 2) << DOC_ARRAY(1)) << "expected"
+ << DOC("$setIsSubset" << false << "$setEquals" << false
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
+ }
+};
+
+class Sub : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1) << DOC_ARRAY(1 << 2)) << "expected"
+ << DOC("$setIsSubset" << true << "$setEquals" << false
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
+ }
+};
+
+class SameBackwards : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(2 << 1)) << "expected"
+ << DOC("$setIsSubset" << true << "$setEquals" << true
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
+ }
+};
+
+class NoOverlap : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 4)) << "expected"
+ << DOC("$setIsSubset" << false << "$setEquals" << false
+ << "$setIntersection" << vector<Value>()
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1 << 2)));
+ }
+};
+
+class Overlap : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 2 << 4)) << "expected"
+ << DOC("$setIsSubset" << false << "$setEquals" << false
+ << "$setIntersection" << DOC_ARRAY(2)
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1)));
+ }
+};
+
+class LastNull : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << Value(BSONNULL)) << "expected"
+ << DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
+ << "$setDifference" << BSONNULL) << "error"
+ << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"));
+ }
+};
+
+class FirstNull : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(Value(BSONNULL) << DOC_ARRAY(1 << 2)) << "expected"
+ << DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
+ << "$setDifference" << BSONNULL) << "error"
+ << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"));
+ }
+};
+
+class NoArg : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC(
+ "input" << vector<Value>() << "expected"
+ << DOC("$setIntersection" << vector<Value>() << "$setUnion" << vector<Value>())
+ << "error" << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"
+ << "$setDifference"));
+ }
+};
+
+class OneArg : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2)) << "expected"
+ << DOC("$setIntersection" << DOC_ARRAY(1 << 2) << "$setUnion"
+ << DOC_ARRAY(1 << 2)) << "error"
+ << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"
+ << "$setDifference"));
+ }
+};
+
+class EmptyArg : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC(
+ "input" << DOC_ARRAY(vector<Value>()) << "expected"
+ << DOC("$setIntersection" << vector<Value>() << "$setUnion" << vector<Value>())
+ << "error" << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"
+ << "$setDifference"));
+ }
+};
+
+class LeftArgEmpty : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(vector<Value>() << DOC_ARRAY(1 << 2)) << "expected"
+ << DOC("$setIntersection" << vector<Value>() << "$setUnion"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << true
+ << "$setEquals" << false << "$setDifference"
+ << vector<Value>()));
+ }
+};
+
+class RightArgEmpty : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << vector<Value>()) << "expected"
+ << DOC("$setIntersection" << vector<Value>() << "$setUnion"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << false
+ << "$setEquals" << false << "$setDifference"
+ << DOC_ARRAY(1 << 2)));
+ }
+};
+
+class ManyArgs : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC(
+ "input" << DOC_ARRAY(DOC_ARRAY(8 << 3)
+ << DOC_ARRAY("asdf"
+ << "foo") << DOC_ARRAY(80.3 << 34) << vector<Value>()
+ << DOC_ARRAY(80.3 << "foo" << 11 << "yay")) << "expected"
+ << DOC("$setIntersection"
+ << vector<Value>() << "$setEquals" << false << "$setUnion"
+ << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"
+ << "foo"
+ << "yay")) << "error" << DOC_ARRAY("$setIsSubset"
+ << "$setDifference"));
+ }
+};
+
+class ManyArgsEqual : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4)
+ << DOC_ARRAY(1 << 2 << 2 << 4) << DOC_ARRAY(4 << 1 << 2)
+ << DOC_ARRAY(2 << 1 << 1 << 4)) << "expected"
+ << DOC("$setIntersection" << DOC_ARRAY(1 << 2 << 4) << "$setEquals"
+ << true << "$setUnion"
+ << DOC_ARRAY(1 << 2 << 4)) << "error"
+ << DOC_ARRAY("$setIsSubset"
+ << "$setDifference"));
+ }
+};
+} // namespace Set
+
+namespace Strcasecmp {
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ assertResult(expectedResult(), spec());
+ assertResult(-expectedResult(), reverseSpec());
+ }
+
+protected:
+ virtual string a() = 0;
+ virtual string b() = 0;
+ virtual int expectedResult() = 0;
+
+private:
+ BSONObj spec() {
+ return BSON("$strcasecmp" << BSON_ARRAY(a() << b()));
+ }
+ BSONObj reverseSpec() {
+ return BSON("$strcasecmp" << BSON_ARRAY(b() << a()));
+ }
+ void assertResult(int expectedResult, const BSONObj& spec) {
+ BSONObj specObj = BSON("" << spec);
+ BSONElement specElement = specObj.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec), expressionToBson(expression));
+ ASSERT_EQUALS(BSON("" << expectedResult), toBson(expression->evaluate(Document())));
+ }
+};
+
+class NullBegin : public ExpectedResultBase {
+ string a() {
+ return string("\0ab", 3);
+ }
+ string b() {
+ return string("\0AB", 3);
+ }
+ int expectedResult() {
+ return 0;
+ }
+};
+
+class NullEnd : public ExpectedResultBase {
+ string a() {
+ return string("ab\0", 3);
+ }
+ string b() {
+ return string("aB\0", 3);
+ }
+ int expectedResult() {
+ return 0;
+ }
+};
+
+class NullMiddleLt : public ExpectedResultBase {
+ string a() {
+ return string("a\0a", 3);
+ }
+ string b() {
+ return string("a\0B", 3);
+ }
+ int expectedResult() {
+ return -1;
+ }
+};
+
+class NullMiddleEq : public ExpectedResultBase {
+ string a() {
+ return string("a\0b", 3);
+ }
+ string b() {
+ return string("a\0B", 3);
+ }
+ int expectedResult() {
+ return 0;
+ }
+};
+
+class NullMiddleGt : public ExpectedResultBase {
+ string a() {
+ return string("a\0c", 3);
+ }
+ string b() {
+ return string("a\0B", 3);
+ }
+ int expectedResult() {
+ return 1;
+ }
+};
+
+} // namespace Strcasecmp
+
+namespace Substr {
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ BSONObj specObj = BSON("" << spec());
+ BSONElement specElement = specObj.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_EQUALS(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
+ }
+
+protected:
+ virtual string str() = 0;
+ virtual int offset() = 0;
+ virtual int length() = 0;
+ virtual string expectedResult() = 0;
+
+private:
+ BSONObj spec() {
+ return BSON("$substr" << BSON_ARRAY(str() << offset() << length()));
+ }
+};
+
+/** Retrieve a full string containing a null character. */
+class FullNull : public ExpectedResultBase {
+ string str() {
+ return string("a\0b", 3);
+ }
+ int offset() {
+ return 0;
+ }
+ int length() {
+ return 3;
+ }
+ string expectedResult() {
+ return str();
+ }
+};
+
+/** Retrieve a substring beginning with a null character. */
+class BeginAtNull : public ExpectedResultBase {
+ string str() {
+ return string("a\0b", 3);
+ }
+ int offset() {
+ return 1;
+ }
+ int length() {
+ return 2;
+ }
+ string expectedResult() {
+ return string("\0b", 2);
+ }
+};
+
+/** Retrieve a substring ending with a null character. */
+class EndAtNull : public ExpectedResultBase {
+ string str() {
+ return string("a\0b", 3);
+ }
+ int offset() {
+ return 0;
+ }
+ int length() {
+ return 2;
+ }
+ string expectedResult() {
+ return string("a\0", 2);
+ }
+};
+
+/** Drop a beginning null character. */
+class DropBeginningNull : public ExpectedResultBase {
+ string str() {
+ return string("\0b", 2);
+ }
+ int offset() {
+ return 1;
+ }
+ int length() {
+ return 1;
+ }
+ string expectedResult() {
+ return "b";
+ }
+};
+
+/** Drop an ending null character. */
+class DropEndingNull : public ExpectedResultBase {
+ string str() {
+ return string("a\0", 2);
+ }
+ int offset() {
+ return 0;
+ }
+ int length() {
+ return 1;
+ }
+ string expectedResult() {
+ return "a";
+ }
+};
+
+} // namespace Substr
+
+namespace ToLower {
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ BSONObj specObj = BSON("" << spec());
+ BSONElement specElement = specObj.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_EQUALS(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
+ }
+
+protected:
+ virtual string str() = 0;
+ virtual string expectedResult() = 0;
+
+private:
+ BSONObj spec() {
+ return BSON("$toLower" << BSON_ARRAY(str()));
+ }
+};
+
+/** String beginning with a null character. */
+class NullBegin : public ExpectedResultBase {
+ string str() {
+ return string("\0aB", 3);
+ }
+ string expectedResult() {
+ return string("\0ab", 3);
+ }
+};
+
+/** String containing a null character. */
+class NullMiddle : public ExpectedResultBase {
+ string str() {
+ return string("a\0B", 3);
+ }
+ string expectedResult() {
+ return string("a\0b", 3);
+ }
+};
+
+/** String ending with a null character. */
+class NullEnd : public ExpectedResultBase {
+ string str() {
+ return string("aB\0", 3);
+ }
+ string expectedResult() {
+ return string("ab\0", 3);
+ }
+};
+
+} // namespace ToLower
+
+namespace ToUpper {
+
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ BSONObj specObj = BSON("" << spec());
+ BSONElement specElement = specObj.firstElement();
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
+ ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_EQUALS(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
+ }
+
+protected:
+ virtual string str() = 0;
+ virtual string expectedResult() = 0;
+
+private:
+ BSONObj spec() {
+ return BSON("$toUpper" << BSON_ARRAY(str()));
+ }
+};
+
+/** String beginning with a null character. */
+class NullBegin : public ExpectedResultBase {
+ string str() {
+ return string("\0aB", 3);
+ }
+ string expectedResult() {
+ return string("\0AB", 3);
+ }
+};
+
+/** String containing a null character. */
+class NullMiddle : public ExpectedResultBase {
+ string str() {
+ return string("a\0B", 3);
+ }
+ string expectedResult() {
+ return string("A\0B", 3);
+ }
+};
+
+/** String ending with a null character. */
+class NullEnd : public ExpectedResultBase {
+ string str() {
+ return string("aB\0", 3);
+ }
+ string expectedResult() {
+ return string("AB\0", 3);
+ }
+};
+
+} // namespace ToUpper
+
+namespace AllAnyElements {
+class ExpectedResultBase {
+public:
+ virtual ~ExpectedResultBase() {}
+ void run() {
+ const Document spec = getSpec();
+ const Value args = spec["input"];
+ if (!spec["expected"].missing()) {
+ FieldIterator fields(spec["expected"].getDocument());
+ while (fields.more()) {
+ const Document::FieldPair field(fields.next());
+ const Value expected = field.second;
+ const BSONObj obj = BSON(field.first << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- ASSERT_EQUALS( BSON( "" << expectedResult() ),
- toBson( expression->evaluate( Document() ) ) );
- }
- protected:
- virtual string str() = 0;
- virtual string expectedResult() = 0;
- private:
- BSONObj spec() {
- return BSON( "$toLower" << BSON_ARRAY( str() ) );
- }
- };
-
- /** String beginning with a null character. */
- class NullBegin : public ExpectedResultBase {
- string str() { return string( "\0aB", 3 ); }
- string expectedResult() { return string( "\0ab", 3 ); }
- };
-
- /** String containing a null character. */
- class NullMiddle : public ExpectedResultBase {
- string str() { return string( "a\0B", 3 ); }
- string expectedResult() { return string( "a\0b", 3 ); }
- };
-
- /** String ending with a null character. */
- class NullEnd : public ExpectedResultBase {
- string str() { return string( "aB\0", 3 ); }
- string expectedResult() { return string( "ab\0", 3 ); }
- };
-
- } // namespace ToLower
-
- namespace ToUpper {
-
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ const Value result = expr->evaluate(Document());
+ if (result != expected) {
+ string errMsg = str::stream()
+ << "for expression " << field.first.toString() << " with argument "
+ << args.toString() << " full tree: " << expr->serialize(false).toString()
+ << " expected: " << expected.toString()
+ << " but got: " << result.toString();
+ FAIL(errMsg);
+ }
+ // TODO test optimize here
}
- void run() {
- BSONObj specObj = BSON( "" << spec() );
- BSONElement specElement = specObj.firstElement();
+ }
+ if (!spec["error"].missing()) {
+ const vector<Value>& asserters = spec["error"].getArray();
+ size_t n = asserters.size();
+ for (size_t i = 0; i < n; i++) {
+ const BSONObj obj = BSON(asserters[i].getString() << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS( constify( spec() ), expressionToBson( expression ) );
- ASSERT_EQUALS( BSON( "" << expectedResult() ),
- toBson( expression->evaluate( Document() ) ) );
- }
- protected:
- virtual string str() = 0;
- virtual string expectedResult() = 0;
- private:
- BSONObj spec() {
- return BSON( "$toUpper" << BSON_ARRAY( str() ) );
- }
- };
-
- /** String beginning with a null character. */
- class NullBegin : public ExpectedResultBase {
- string str() { return string( "\0aB", 3 ); }
- string expectedResult() { return string( "\0AB", 3 ); }
- };
-
- /** String containing a null character. */
- class NullMiddle : public ExpectedResultBase {
- string str() { return string( "a\0B", 3 ); }
- string expectedResult() { return string( "A\0B", 3 ); }
- };
-
- /** String ending with a null character. */
- class NullEnd : public ExpectedResultBase {
- string str() { return string( "aB\0", 3 ); }
- string expectedResult() { return string( "AB\0", 3 ); }
- };
-
- } // namespace ToUpper
-
- namespace AllAnyElements {
- class ExpectedResultBase {
- public:
- virtual ~ExpectedResultBase() {}
- void run() {
- const Document spec = getSpec();
- const Value args = spec["input"];
- if (!spec["expected"].missing()) {
- FieldIterator fields(spec["expected"].getDocument());
- while (fields.more()) {
- const Document::FieldPair field(fields.next());
- const Value expected = field.second;
- const BSONObj obj = BSON(field.first << args);
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- const Value result = expr->evaluate(Document());
- if (result != expected) {
- string errMsg = str::stream()
- << "for expression " << field.first.toString()
- << " with argument " << args.toString()
- << " full tree: " << expr->serialize(false).toString()
- << " expected: " << expected.toString()
- << " but got: " << result.toString();
- FAIL(errMsg);
- }
- //TODO test optimize here
- }
- }
- if (!spec["error"].missing()) {
- const vector<Value>& asserters = spec["error"].getArray();
- size_t n = asserters.size();
- for (size_t i = 0; i < n; i++) {
- const BSONObj obj = BSON(asserters[i].getString() << args);
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- ASSERT_THROWS({
- // NOTE: parse and evaluatation failures are treated the same
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- expr->evaluate(Document());
- }, UserException);
- }
- }
- }
- private:
- virtual Document getSpec() = 0;
- };
-
- class JustFalse : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(false) )
- << "expected" << DOC("$allElementsTrue" << false
- << "$anyElementTrue" << false) );
+ ASSERT_THROWS({
+ // NOTE: parse and evaluatation failures are treated the same
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ expr->evaluate(Document());
+ }, UserException);
}
- };
+ }
+ }
- class JustTrue : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(true) )
- << "expected" << DOC("$allElementsTrue" << true
- << "$anyElementTrue" << true) );
- }
- };
+private:
+ virtual Document getSpec() = 0;
+};
- class OneTrueOneFalse : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(true << false) )
- << "expected" << DOC("$allElementsTrue" << false
- << "$anyElementTrue" << true) );
- }
- };
+class JustFalse : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(false)) << "expected"
+ << DOC("$allElementsTrue" << false << "$anyElementTrue" << false));
+ }
+};
- class Empty : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( vector<Value>() )
- << "expected" << DOC("$allElementsTrue" << true
- << "$anyElementTrue" << false) );
- }
- };
+class JustTrue : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(true)) << "expected"
+ << DOC("$allElementsTrue" << true << "$anyElementTrue" << true));
+ }
+};
- class TrueViaInt : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(1) )
- << "expected" << DOC("$allElementsTrue" << true
- << "$anyElementTrue" << true) );
- }
- };
+class OneTrueOneFalse : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(true << false)) << "expected"
+ << DOC("$allElementsTrue" << false << "$anyElementTrue" << true));
+ }
+};
- class FalseViaInt : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY( DOC_ARRAY(0) )
- << "expected" << DOC("$allElementsTrue" << false
- << "$anyElementTrue" << false) );
- }
- };
+class Empty : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(vector<Value>()) << "expected"
+ << DOC("$allElementsTrue" << true << "$anyElementTrue" << false));
+ }
+};
- class Null : public ExpectedResultBase {
- Document getSpec() {
- return DOC("input" << DOC_ARRAY(BSONNULL)
- << "error" << DOC_ARRAY("$allElementsTrue"
- << "$anyElementTrue") );
- }
- };
+class TrueViaInt : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1)) << "expected"
+ << DOC("$allElementsTrue" << true << "$anyElementTrue" << true));
+ }
+};
- } // namespace AllAnyElements
+class FalseViaInt : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(0)) << "expected"
+ << DOC("$allElementsTrue" << false << "$anyElementTrue" << false));
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "expression" ) {
- }
- void setupTests() {
- add<Add::NullDocument>();
- add<Add::NoOperands>();
- add<Add::Date>();
- add<Add::String>();
- add<Add::Bool>();
- add<Add::Int>();
- add<Add::Long>();
- add<Add::Double>();
- add<Add::Null>();
- add<Add::Undefined>();
- add<Add::IntInt>();
- add<Add::IntIntNoOverflow>();
- add<Add::IntLong>();
- add<Add::IntLongOverflow>();
- add<Add::IntDouble>();
- add<Add::IntDate>();
- add<Add::LongDouble>();
- add<Add::LongDoubleNoOverflow>();
- add<Add::IntNull>();
- add<Add::LongUndefined>();
-
- add<And::NoOperands>();
- add<And::True>();
- add<And::False>();
- add<And::TrueTrue>();
- add<And::TrueFalse>();
- add<And::FalseTrue>();
- add<And::FalseFalse>();
- add<And::TrueTrueTrue>();
- add<And::TrueTrueFalse>();
- add<And::TrueTrueFalse>();
- add<And::ZeroOne>();
- add<And::OneTwo>();
- add<And::FieldPath>();
- add<And::OptimizeConstantExpression>();
- add<And::NonConstant>();
- add<And::ConstantNonConstantTrue>();
- add<And::ConstantNonConstantFalse>();
- add<And::NonConstantOne>();
- add<And::NonConstantZero>();
- add<And::NonConstantNonConstantOne>();
- add<And::NonConstantNonConstantZero>();
- add<And::ZeroOneNonConstant>();
- add<And::OneOneNonConstant>();
- add<And::Nested>();
- add<And::NestedZero>();
-
- add<CoerceToBool::EvaluateTrue>();
- add<CoerceToBool::EvaluateFalse>();
- add<CoerceToBool::Dependencies>();
- add<CoerceToBool::AddToBsonObj>();
- add<CoerceToBool::AddToBsonArray>();
-
- add<Compare::EqLt>();
- add<Compare::EqEq>();
- add<Compare::EqGt>();
- add<Compare::NeLt>();
- add<Compare::NeEq>();
- add<Compare::NeGt>();
- add<Compare::GtLt>();
- add<Compare::GtEq>();
- add<Compare::GtGt>();
- add<Compare::GteLt>();
- add<Compare::GteEq>();
- add<Compare::GteGt>();
- add<Compare::LtLt>();
- add<Compare::LtEq>();
- add<Compare::LtGt>();
- add<Compare::LteLt>();
- add<Compare::LteEq>();
- add<Compare::LteGt>();
- add<Compare::CmpLt>();
- add<Compare::CmpEq>();
- add<Compare::CmpGt>();
- add<Compare::CmpBracketed>();
- add<Compare::ZeroOperands>();
- add<Compare::OneOperand>();
- add<Compare::ThreeOperands>();
- add<Compare::IncompatibleTypes>();
- add<Compare::OptimizeConstants>();
- add<Compare::NoOptimizeCmp>();
- add<Compare::NoOptimizeNe>();
- add<Compare::NoOptimizeNoConstant>();
- add<Compare::NoOptimizeWithoutFieldPath>();
- add<Compare::NoOptimizeWithoutFieldPathReverse>();
- add<Compare::OptimizeEq>();
- add<Compare::OptimizeEqReverse>();
- add<Compare::OptimizeLt>();
- add<Compare::OptimizeLtReverse>();
- add<Compare::OptimizeLte>();
- add<Compare::OptimizeLteReverse>();
- add<Compare::OptimizeGt>();
- add<Compare::OptimizeGtReverse>();
- add<Compare::OptimizeGte>();
- add<Compare::OptimizeGteReverse>();
-
- add<Constant::Create>();
- add<Constant::CreateFromBsonElement>();
- add<Constant::Optimize>();
- add<Constant::Dependencies>();
- add<Constant::AddToBsonObj>();
- add<Constant::AddToBsonArray>();
-
- add<FieldPath::Invalid>();
- add<FieldPath::Optimize>();
- add<FieldPath::Dependencies>();
- add<FieldPath::Missing>();
- add<FieldPath::Present>();
- add<FieldPath::NestedBelowNull>();
- add<FieldPath::NestedBelowUndefined>();
- add<FieldPath::NestedBelowMissing>();
- add<FieldPath::NestedBelowInt>();
- add<FieldPath::NestedValue>();
- add<FieldPath::NestedBelowEmptyObject>();
- add<FieldPath::NestedBelowEmptyArray>();
- add<FieldPath::NestedBelowEmptyArray>();
- add<FieldPath::NestedBelowArrayWithNull>();
- add<FieldPath::NestedBelowArrayWithUndefined>();
- add<FieldPath::NestedBelowArrayWithInt>();
- add<FieldPath::NestedWithinArray>();
- add<FieldPath::MultipleArrayValues>();
- add<FieldPath::ExpandNestedArrays>();
- add<FieldPath::AddToBsonObj>();
- add<FieldPath::AddToBsonArray>();
-
- add<Nary::AddOperand>();
- add<Nary::Dependencies>();
- add<Nary::AddToBsonObj>();
- add<Nary::AddToBsonArray>();
- add<Nary::OptimizeOneOperand>();
- add<Nary::EvaluateAllConstantOperands>();
- add<Nary::StringConstant>();
- add<Nary::SingleConstant>();
- add<Nary::NoFactory>();
- add<Nary::FactoryOptimize>();
- add<Nary::FlattenOptimize>();
- add<Nary::FlattenThreeLayers>();
-
- add<Object::Empty>();
- add<Object::Include>();
- add<Object::MissingInclude>();
- add<Object::IncludeId>();
- add<Object::ExcludeId>();
- add<Object::SourceOrder>();
- add<Object::IncludeNested>();
- add<Object::IncludeTwoNested>();
- add<Object::IncludeTwoParentNested>();
- add<Object::IncludeMissingNested>();
- add<Object::IncludeNestedWithinNonObject>();
- add<Object::IncludeArrayNested>();
- add<Object::ExcludeNonRootId>();
- add<Object::Computed>();
- add<Object::ComputedReplacement>();
- add<Object::ComputedUndefined>();
- add<Object::ComputedUndefinedReplacement>();
- add<Object::ComputedNull>();
- add<Object::ComputedNested>();
- add<Object::ComputedFieldPath>();
- add<Object::ComputedNestedFieldPath>();
- add<Object::EmptyNewSubobject>();
- add<Object::NonEmptyNewSubobject>();
- add<Object::AdjacentNestedComputedFields>();
- add<Object::AdjacentDottedAndNestedComputedFields>();
- add<Object::AdjacentNestedAndDottedComputedFields>();
- add<Object::AdjacentDottedComputedFields>();
- add<Object::AdjacentNestedOrdering>();
- add<Object::MultipleNestedFields>();
- add<Object::ConflictingExpressionFields>();
- add<Object::ConflictingInclusionExpressionFields>();
- add<Object::ConflictingExpressionInclusionFields>();
- add<Object::ConflictingObjectConstantExpressionFields>();
- add<Object::ConflictingConstantObjectExpressionFields>();
- add<Object::ConflictingNestedFields>();
- add<Object::ConflictingFieldAndSubfield>();
- add<Object::ConflictingFieldAndNestedField>();
- add<Object::ConflictingSubfieldAndField>();
- add<Object::ConflictingNestedFieldAndField>();
- add<Object::NonInclusionDependencies>();
- add<Object::InclusionDependencies>();
- add<Object::Optimize>();
- add<Object::AddToBsonObj>();
- add<Object::AddToBsonObjRequireExpression>();
- add<Object::AddToBsonArray>();
- add<Object::Evaluate>();
-
- add<Or::NoOperands>();
- add<Or::True>();
- add<Or::False>();
- add<Or::TrueTrue>();
- add<Or::TrueFalse>();
- add<Or::FalseTrue>();
- add<Or::FalseFalse>();
- add<Or::FalseFalseFalse>();
- add<Or::FalseFalseTrue>();
- add<Or::ZeroOne>();
- add<Or::ZeroFalse>();
- add<Or::FieldPath>();
- add<Or::OptimizeConstantExpression>();
- add<Or::NonConstant>();
- add<Or::ConstantNonConstantTrue>();
- add<Or::ConstantNonConstantFalse>();
- add<Or::NonConstantOne>();
- add<Or::NonConstantZero>();
- add<Or::NonConstantNonConstantOne>();
- add<Or::NonConstantNonConstantZero>();
- add<Or::ZeroOneNonConstant>();
- add<Or::ZeroZeroNonConstant>();
- add<Or::Nested>();
- add<Or::NestedOne>();
-
- add<Parse::Object::NonObject>();
- add<Parse::Object::Empty>();
- add<Parse::Object::Operator>();
- add<Parse::Object::InvalidOperator>();
- add<Parse::Object::TwoOperators>();
- add<Parse::Object::OperatorLaterField>();
- add<Parse::Object::OperatorAndOtherField>();
- add<Parse::Object::OperatorTopLevel>();
- add<Parse::Object::Dotted>();
- add<Parse::Object::DottedTopLevel>();
- add<Parse::Object::Nested>();
- add<Parse::Object::NestedParseError>();
- add<Parse::Object::FieldPath>();
- add<Parse::Object::InvalidFieldPath>();
- add<Parse::Object::NonFieldPathString>();
- add<Parse::Object::DisallowedInclusion>();
- add<Parse::Object::InclusionBool>();
- add<Parse::Object::InclusionDouble>();
- add<Parse::Object::InclusionInt>();
- add<Parse::Object::InclusionLong>();
- add<Parse::Object::NestedInclusion>();
- add<Parse::Object::ExcludeId>();
- add<Parse::Object::ExcludeNonId>();
- add<Parse::Object::ExcludeIdNotTopLevel>();
- add<Parse::Object::InvalidType>();
- add<Parse::Expression::Const>();
- add<Parse::Expression::InvalidName>();
- add<Parse::Expression::RequiredArrayMissing>();
- add<Parse::Expression::IncorrectOperandCount>();
- add<Parse::Expression::CorrectOperandCount>();
- add<Parse::Expression::ZeroOperands>();
- add<Parse::Expression::OneOperand>();
- add<Parse::Expression::TwoOperands>();
- add<Parse::Expression::SingletonOperandVariable>();
- add<Parse::Expression::SingletonOperandFixed>();
- add<Parse::Expression::ObjectSingleton>();
- add<Parse::Expression::ObjectOperand>();
- add<Parse::Operand::FieldPath>();
- add<Parse::Operand::NonFieldPathString>();
- add<Parse::Operand::Object>();
- add<Parse::Operand::InclusionObject>();
- add<Parse::Operand::Constant>();
-
- add<Strcasecmp::NullBegin>();
- add<Strcasecmp::NullEnd>();
- add<Strcasecmp::NullMiddleLt>();
- add<Strcasecmp::NullMiddleEq>();
- add<Strcasecmp::NullMiddleGt>();
-
- add<Substr::FullNull>();
- add<Substr::BeginAtNull>();
- add<Substr::EndAtNull>();
- add<Substr::DropBeginningNull>();
- add<Substr::DropEndingNull>();
-
- add<ToLower::NullBegin>();
- add<ToLower::NullMiddle>();
- add<ToLower::NullEnd>();
-
- add<ToUpper::NullBegin>();
- add<ToUpper::NullMiddle>();
- add<ToUpper::NullEnd>();
-
- add<Set::Same>();
- add<Set::Redundant>();
- add<Set::DoubleRedundant>();
- add<Set::Sub>();
- add<Set::Super>();
- add<Set::SameBackwards>();
- add<Set::NoOverlap>();
- add<Set::Overlap>();
- add<Set::FirstNull>();
- add<Set::LastNull>();
- add<Set::NoArg>();
- add<Set::OneArg>();
- add<Set::EmptyArg>();
- add<Set::LeftArgEmpty>();
- add<Set::RightArgEmpty>();
- add<Set::ManyArgs>();
- add<Set::ManyArgsEqual>();
-
- add<AllAnyElements::JustFalse>();
- add<AllAnyElements::JustTrue>();
- add<AllAnyElements::OneTrueOneFalse>();
- add<AllAnyElements::Empty>();
- add<AllAnyElements::TrueViaInt>();
- add<AllAnyElements::FalseViaInt>();
- add<AllAnyElements::Null>();
- }
- };
+class Null : public ExpectedResultBase {
+ Document getSpec() {
+ return DOC("input" << DOC_ARRAY(BSONNULL) << "error" << DOC_ARRAY("$allElementsTrue"
+ << "$anyElementTrue"));
+ }
+};
+
+} // namespace AllAnyElements
+
+class All : public Suite {
+public:
+ All() : Suite("expression") {}
+ void setupTests() {
+ add<Add::NullDocument>();
+ add<Add::NoOperands>();
+ add<Add::Date>();
+ add<Add::String>();
+ add<Add::Bool>();
+ add<Add::Int>();
+ add<Add::Long>();
+ add<Add::Double>();
+ add<Add::Null>();
+ add<Add::Undefined>();
+ add<Add::IntInt>();
+ add<Add::IntIntNoOverflow>();
+ add<Add::IntLong>();
+ add<Add::IntLongOverflow>();
+ add<Add::IntDouble>();
+ add<Add::IntDate>();
+ add<Add::LongDouble>();
+ add<Add::LongDoubleNoOverflow>();
+ add<Add::IntNull>();
+ add<Add::LongUndefined>();
+
+ add<And::NoOperands>();
+ add<And::True>();
+ add<And::False>();
+ add<And::TrueTrue>();
+ add<And::TrueFalse>();
+ add<And::FalseTrue>();
+ add<And::FalseFalse>();
+ add<And::TrueTrueTrue>();
+ add<And::TrueTrueFalse>();
+ add<And::TrueTrueFalse>();
+ add<And::ZeroOne>();
+ add<And::OneTwo>();
+ add<And::FieldPath>();
+ add<And::OptimizeConstantExpression>();
+ add<And::NonConstant>();
+ add<And::ConstantNonConstantTrue>();
+ add<And::ConstantNonConstantFalse>();
+ add<And::NonConstantOne>();
+ add<And::NonConstantZero>();
+ add<And::NonConstantNonConstantOne>();
+ add<And::NonConstantNonConstantZero>();
+ add<And::ZeroOneNonConstant>();
+ add<And::OneOneNonConstant>();
+ add<And::Nested>();
+ add<And::NestedZero>();
+
+ add<CoerceToBool::EvaluateTrue>();
+ add<CoerceToBool::EvaluateFalse>();
+ add<CoerceToBool::Dependencies>();
+ add<CoerceToBool::AddToBsonObj>();
+ add<CoerceToBool::AddToBsonArray>();
+
+ add<Compare::EqLt>();
+ add<Compare::EqEq>();
+ add<Compare::EqGt>();
+ add<Compare::NeLt>();
+ add<Compare::NeEq>();
+ add<Compare::NeGt>();
+ add<Compare::GtLt>();
+ add<Compare::GtEq>();
+ add<Compare::GtGt>();
+ add<Compare::GteLt>();
+ add<Compare::GteEq>();
+ add<Compare::GteGt>();
+ add<Compare::LtLt>();
+ add<Compare::LtEq>();
+ add<Compare::LtGt>();
+ add<Compare::LteLt>();
+ add<Compare::LteEq>();
+ add<Compare::LteGt>();
+ add<Compare::CmpLt>();
+ add<Compare::CmpEq>();
+ add<Compare::CmpGt>();
+ add<Compare::CmpBracketed>();
+ add<Compare::ZeroOperands>();
+ add<Compare::OneOperand>();
+ add<Compare::ThreeOperands>();
+ add<Compare::IncompatibleTypes>();
+ add<Compare::OptimizeConstants>();
+ add<Compare::NoOptimizeCmp>();
+ add<Compare::NoOptimizeNe>();
+ add<Compare::NoOptimizeNoConstant>();
+ add<Compare::NoOptimizeWithoutFieldPath>();
+ add<Compare::NoOptimizeWithoutFieldPathReverse>();
+ add<Compare::OptimizeEq>();
+ add<Compare::OptimizeEqReverse>();
+ add<Compare::OptimizeLt>();
+ add<Compare::OptimizeLtReverse>();
+ add<Compare::OptimizeLte>();
+ add<Compare::OptimizeLteReverse>();
+ add<Compare::OptimizeGt>();
+ add<Compare::OptimizeGtReverse>();
+ add<Compare::OptimizeGte>();
+ add<Compare::OptimizeGteReverse>();
+
+ add<Constant::Create>();
+ add<Constant::CreateFromBsonElement>();
+ add<Constant::Optimize>();
+ add<Constant::Dependencies>();
+ add<Constant::AddToBsonObj>();
+ add<Constant::AddToBsonArray>();
+
+ add<FieldPath::Invalid>();
+ add<FieldPath::Optimize>();
+ add<FieldPath::Dependencies>();
+ add<FieldPath::Missing>();
+ add<FieldPath::Present>();
+ add<FieldPath::NestedBelowNull>();
+ add<FieldPath::NestedBelowUndefined>();
+ add<FieldPath::NestedBelowMissing>();
+ add<FieldPath::NestedBelowInt>();
+ add<FieldPath::NestedValue>();
+ add<FieldPath::NestedBelowEmptyObject>();
+ add<FieldPath::NestedBelowEmptyArray>();
+ add<FieldPath::NestedBelowEmptyArray>();
+ add<FieldPath::NestedBelowArrayWithNull>();
+ add<FieldPath::NestedBelowArrayWithUndefined>();
+ add<FieldPath::NestedBelowArrayWithInt>();
+ add<FieldPath::NestedWithinArray>();
+ add<FieldPath::MultipleArrayValues>();
+ add<FieldPath::ExpandNestedArrays>();
+ add<FieldPath::AddToBsonObj>();
+ add<FieldPath::AddToBsonArray>();
+
+ add<Nary::AddOperand>();
+ add<Nary::Dependencies>();
+ add<Nary::AddToBsonObj>();
+ add<Nary::AddToBsonArray>();
+ add<Nary::OptimizeOneOperand>();
+ add<Nary::EvaluateAllConstantOperands>();
+ add<Nary::StringConstant>();
+ add<Nary::SingleConstant>();
+ add<Nary::NoFactory>();
+ add<Nary::FactoryOptimize>();
+ add<Nary::FlattenOptimize>();
+ add<Nary::FlattenThreeLayers>();
+
+ add<Object::Empty>();
+ add<Object::Include>();
+ add<Object::MissingInclude>();
+ add<Object::IncludeId>();
+ add<Object::ExcludeId>();
+ add<Object::SourceOrder>();
+ add<Object::IncludeNested>();
+ add<Object::IncludeTwoNested>();
+ add<Object::IncludeTwoParentNested>();
+ add<Object::IncludeMissingNested>();
+ add<Object::IncludeNestedWithinNonObject>();
+ add<Object::IncludeArrayNested>();
+ add<Object::ExcludeNonRootId>();
+ add<Object::Computed>();
+ add<Object::ComputedReplacement>();
+ add<Object::ComputedUndefined>();
+ add<Object::ComputedUndefinedReplacement>();
+ add<Object::ComputedNull>();
+ add<Object::ComputedNested>();
+ add<Object::ComputedFieldPath>();
+ add<Object::ComputedNestedFieldPath>();
+ add<Object::EmptyNewSubobject>();
+ add<Object::NonEmptyNewSubobject>();
+ add<Object::AdjacentNestedComputedFields>();
+ add<Object::AdjacentDottedAndNestedComputedFields>();
+ add<Object::AdjacentNestedAndDottedComputedFields>();
+ add<Object::AdjacentDottedComputedFields>();
+ add<Object::AdjacentNestedOrdering>();
+ add<Object::MultipleNestedFields>();
+ add<Object::ConflictingExpressionFields>();
+ add<Object::ConflictingInclusionExpressionFields>();
+ add<Object::ConflictingExpressionInclusionFields>();
+ add<Object::ConflictingObjectConstantExpressionFields>();
+ add<Object::ConflictingConstantObjectExpressionFields>();
+ add<Object::ConflictingNestedFields>();
+ add<Object::ConflictingFieldAndSubfield>();
+ add<Object::ConflictingFieldAndNestedField>();
+ add<Object::ConflictingSubfieldAndField>();
+ add<Object::ConflictingNestedFieldAndField>();
+ add<Object::NonInclusionDependencies>();
+ add<Object::InclusionDependencies>();
+ add<Object::Optimize>();
+ add<Object::AddToBsonObj>();
+ add<Object::AddToBsonObjRequireExpression>();
+ add<Object::AddToBsonArray>();
+ add<Object::Evaluate>();
+
+ add<Or::NoOperands>();
+ add<Or::True>();
+ add<Or::False>();
+ add<Or::TrueTrue>();
+ add<Or::TrueFalse>();
+ add<Or::FalseTrue>();
+ add<Or::FalseFalse>();
+ add<Or::FalseFalseFalse>();
+ add<Or::FalseFalseTrue>();
+ add<Or::ZeroOne>();
+ add<Or::ZeroFalse>();
+ add<Or::FieldPath>();
+ add<Or::OptimizeConstantExpression>();
+ add<Or::NonConstant>();
+ add<Or::ConstantNonConstantTrue>();
+ add<Or::ConstantNonConstantFalse>();
+ add<Or::NonConstantOne>();
+ add<Or::NonConstantZero>();
+ add<Or::NonConstantNonConstantOne>();
+ add<Or::NonConstantNonConstantZero>();
+ add<Or::ZeroOneNonConstant>();
+ add<Or::ZeroZeroNonConstant>();
+ add<Or::Nested>();
+ add<Or::NestedOne>();
+
+ add<Parse::Object::NonObject>();
+ add<Parse::Object::Empty>();
+ add<Parse::Object::Operator>();
+ add<Parse::Object::InvalidOperator>();
+ add<Parse::Object::TwoOperators>();
+ add<Parse::Object::OperatorLaterField>();
+ add<Parse::Object::OperatorAndOtherField>();
+ add<Parse::Object::OperatorTopLevel>();
+ add<Parse::Object::Dotted>();
+ add<Parse::Object::DottedTopLevel>();
+ add<Parse::Object::Nested>();
+ add<Parse::Object::NestedParseError>();
+ add<Parse::Object::FieldPath>();
+ add<Parse::Object::InvalidFieldPath>();
+ add<Parse::Object::NonFieldPathString>();
+ add<Parse::Object::DisallowedInclusion>();
+ add<Parse::Object::InclusionBool>();
+ add<Parse::Object::InclusionDouble>();
+ add<Parse::Object::InclusionInt>();
+ add<Parse::Object::InclusionLong>();
+ add<Parse::Object::NestedInclusion>();
+ add<Parse::Object::ExcludeId>();
+ add<Parse::Object::ExcludeNonId>();
+ add<Parse::Object::ExcludeIdNotTopLevel>();
+ add<Parse::Object::InvalidType>();
+ add<Parse::Expression::Const>();
+ add<Parse::Expression::InvalidName>();
+ add<Parse::Expression::RequiredArrayMissing>();
+ add<Parse::Expression::IncorrectOperandCount>();
+ add<Parse::Expression::CorrectOperandCount>();
+ add<Parse::Expression::ZeroOperands>();
+ add<Parse::Expression::OneOperand>();
+ add<Parse::Expression::TwoOperands>();
+ add<Parse::Expression::SingletonOperandVariable>();
+ add<Parse::Expression::SingletonOperandFixed>();
+ add<Parse::Expression::ObjectSingleton>();
+ add<Parse::Expression::ObjectOperand>();
+ add<Parse::Operand::FieldPath>();
+ add<Parse::Operand::NonFieldPathString>();
+ add<Parse::Operand::Object>();
+ add<Parse::Operand::InclusionObject>();
+ add<Parse::Operand::Constant>();
+
+ add<Strcasecmp::NullBegin>();
+ add<Strcasecmp::NullEnd>();
+ add<Strcasecmp::NullMiddleLt>();
+ add<Strcasecmp::NullMiddleEq>();
+ add<Strcasecmp::NullMiddleGt>();
+
+ add<Substr::FullNull>();
+ add<Substr::BeginAtNull>();
+ add<Substr::EndAtNull>();
+ add<Substr::DropBeginningNull>();
+ add<Substr::DropEndingNull>();
+
+ add<ToLower::NullBegin>();
+ add<ToLower::NullMiddle>();
+ add<ToLower::NullEnd>();
+
+ add<ToUpper::NullBegin>();
+ add<ToUpper::NullMiddle>();
+ add<ToUpper::NullEnd>();
+
+ add<Set::Same>();
+ add<Set::Redundant>();
+ add<Set::DoubleRedundant>();
+ add<Set::Sub>();
+ add<Set::Super>();
+ add<Set::SameBackwards>();
+ add<Set::NoOverlap>();
+ add<Set::Overlap>();
+ add<Set::FirstNull>();
+ add<Set::LastNull>();
+ add<Set::NoArg>();
+ add<Set::OneArg>();
+ add<Set::EmptyArg>();
+ add<Set::LeftArgEmpty>();
+ add<Set::RightArgEmpty>();
+ add<Set::ManyArgs>();
+ add<Set::ManyArgsEqual>();
+
+ add<AllAnyElements::JustFalse>();
+ add<AllAnyElements::JustTrue>();
+ add<AllAnyElements::OneTrueOneFalse>();
+ add<AllAnyElements::Empty>();
+ add<AllAnyElements::TrueViaInt>();
+ add<AllAnyElements::FalseViaInt>();
+ add<AllAnyElements::Null>();
+ }
+};
- SuiteInstance<All> myall;
+SuiteInstance<All> myall;
-} // namespace ExpressionTests
+} // namespace ExpressionTests
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index 29cfa2dd608..eef0dfe9f45 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -58,94 +58,96 @@ namespace moe = mongo::optionenvironment;
namespace mongo {
- using std::endl;
- using std::string;
-
- namespace dbtests {
-
- mutex globalCurrentTestNameMutex("globalCurrentTestNameMutex");
- std::string globalCurrentTestName;
-
- class TestWatchDog : public BackgroundJob {
- public:
- virtual string name() const { return "TestWatchDog"; }
- virtual void run(){
-
- int minutesRunning = 0;
- std::string lastRunningTestName, currentTestName;
-
- {
- scoped_lock lk( globalCurrentTestNameMutex );
- lastRunningTestName = globalCurrentTestName;
- }
-
- while (true) {
- sleepsecs(60);
- minutesRunning++;
-
- {
- scoped_lock lk( globalCurrentTestNameMutex );
- currentTestName = globalCurrentTestName;
- }
-
- if (currentTestName != lastRunningTestName) {
- minutesRunning = 0;
- lastRunningTestName = currentTestName;
- }
-
- if (minutesRunning > 30){
- log() << currentTestName << " has been running for more than 30 minutes. aborting." << endl;
- ::abort();
- }
- else if (minutesRunning > 1){
- warning() << currentTestName << " has been running for more than " << minutesRunning-1 << " minutes." << endl;
-
- // See what is stuck
- getGlobalLockManager()->dump();
- }
- }
- }
- };
+using std::endl;
+using std::string;
- int runDbTests(int argc, char** argv) {
- frameworkGlobalParams.perfHist = 1;
- frameworkGlobalParams.seed = time( 0 );
- frameworkGlobalParams.runsPerTest = 1;
+namespace dbtests {
- Client::initThread("testsuite");
+mutex globalCurrentTestNameMutex("globalCurrentTestNameMutex");
+std::string globalCurrentTestName;
- srand( (unsigned) frameworkGlobalParams.seed );
- printGitVersion();
- printOpenSSLVersion();
- printSysInfo();
+class TestWatchDog : public BackgroundJob {
+public:
+ virtual string name() const {
+ return "TestWatchDog";
+ }
+ virtual void run() {
+ int minutesRunning = 0;
+ std::string lastRunningTestName, currentTestName;
- getGlobalEnvironment()->setGlobalStorageEngine(storageGlobalParams.engine);
+ {
+ scoped_lock lk(globalCurrentTestNameMutex);
+ lastRunningTestName = globalCurrentTestName;
+ }
- TestWatchDog twd;
- twd.go();
+ while (true) {
+ sleepsecs(60);
+ minutesRunning++;
+
+ {
+ scoped_lock lk(globalCurrentTestNameMutex);
+ currentTestName = globalCurrentTestName;
+ }
- int ret = ::mongo::unittest::Suite::run(frameworkGlobalParams.suites,
- frameworkGlobalParams.filter,
- frameworkGlobalParams.runsPerTest);
+ if (currentTestName != lastRunningTestName) {
+ minutesRunning = 0;
+ lastRunningTestName = currentTestName;
+ }
+ if (minutesRunning > 30) {
+ log() << currentTestName << " has been running for more than 30 minutes. aborting."
+ << endl;
+ ::abort();
+ } else if (minutesRunning > 1) {
+ warning() << currentTestName << " has been running for more than "
+ << minutesRunning - 1 << " minutes." << endl;
- cc().shutdown();
- exitCleanly( (ExitCode)ret ); // so everything shuts down cleanly
- return ret;
+ // See what is stuck
+ getGlobalLockManager()->dump();
+ }
}
- } // namespace dbtests
+ }
+};
+
+int runDbTests(int argc, char** argv) {
+ frameworkGlobalParams.perfHist = 1;
+ frameworkGlobalParams.seed = time(0);
+ frameworkGlobalParams.runsPerTest = 1;
+
+ Client::initThread("testsuite");
+
+ srand((unsigned)frameworkGlobalParams.seed);
+ printGitVersion();
+ printOpenSSLVersion();
+ printSysInfo();
+
+ getGlobalEnvironment()->setGlobalStorageEngine(storageGlobalParams.engine);
+
+ TestWatchDog twd;
+ twd.go();
+
+ int ret = ::mongo::unittest::Suite::run(frameworkGlobalParams.suites,
+ frameworkGlobalParams.filter,
+ frameworkGlobalParams.runsPerTest);
+
+
+ cc().shutdown();
+ exitCleanly((ExitCode)ret); // so everything shuts down cleanly
+ return ret;
+}
+} // namespace dbtests
#ifdef _WIN32
namespace ntservice {
- bool shouldStartService() {
- return false;
- }
+bool shouldStartService() {
+ return false;
+}
}
#endif
} // namespace mongo
-void mongo::unittest::onCurrentTestNameChange( const std::string &testName ) {
- scoped_lock lk( mongo::dbtests::globalCurrentTestNameMutex );
+void mongo::unittest::onCurrentTestNameChange(const std::string& testName) {
+ scoped_lock lk(mongo::dbtests::globalCurrentTestNameMutex);
mongo::dbtests::globalCurrentTestName = testName;
}
diff --git a/src/mongo/dbtests/framework.h b/src/mongo/dbtests/framework.h
index c73a27c48f0..9f9edeb0831 100644
--- a/src/mongo/dbtests/framework.h
+++ b/src/mongo/dbtests/framework.h
@@ -36,7 +36,7 @@
#include <string>
namespace mongo {
- namespace dbtests {
- int runDbTests(int argc, char ** argv);
- } // dbtests
+namespace dbtests {
+int runDbTests(int argc, char** argv);
+} // dbtests
} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options.cpp b/src/mongo/dbtests/framework_options.cpp
index 26b795e0893..68c0ff15351 100644
--- a/src/mongo/dbtests/framework_options.cpp
+++ b/src/mongo/dbtests/framework_options.cpp
@@ -46,197 +46,195 @@
namespace mongo {
- using std::cout;
- using std::endl;
- using std::string;
- using std::vector;
+using std::cout;
+using std::endl;
+using std::string;
+using std::vector;
- FrameworkGlobalParams frameworkGlobalParams;
+FrameworkGlobalParams frameworkGlobalParams;
- Status addTestFrameworkOptions(moe::OptionSection* options) {
+Status addTestFrameworkOptions(moe::OptionSection* options) {
+ options->addOptionChaining("help", "help,h", moe::Switch, "show this usage information");
- options->addOptionChaining("help", "help,h", moe::Switch, "show this usage information");
+ options->addOptionChaining(
+ "dbpath",
+ "dbpath",
+ moe::String,
+ "db data path for this test run. NOTE: the contents of this directory will "
+ "be overwritten if it already exists")
+ .setDefault(moe::Value(dbtests::default_test_dbpath));
- options->addOptionChaining("dbpath", "dbpath", moe::String,
- "db data path for this test run. NOTE: the contents of this directory will "
- "be overwritten if it already exists")
- .setDefault(moe::Value(dbtests::default_test_dbpath));
+ options->addOptionChaining("debug", "debug", moe::Switch, "run tests with verbose output");
- options->addOptionChaining("debug", "debug", moe::Switch, "run tests with verbose output");
+ options->addOptionChaining("list", "list,l", moe::Switch, "list available test suites");
- options->addOptionChaining("list", "list,l", moe::Switch, "list available test suites");
+ options->addOptionChaining("bigfiles",
+ "bigfiles",
+ moe::Switch,
+ "use big datafiles instead of smallfiles which is the default");
- options->addOptionChaining("bigfiles", "bigfiles", moe::Switch,
- "use big datafiles instead of smallfiles which is the default");
+ options->addOptionChaining(
+ "filter", "filter,f", moe::String, "string substring filter on test name");
- options->addOptionChaining("filter", "filter,f", moe::String,
- "string substring filter on test name");
+ options->addOptionChaining("verbose", "verbose,v", moe::Switch, "verbose");
- options->addOptionChaining("verbose", "verbose,v", moe::Switch, "verbose");
+ options->addOptionChaining(
+ "dur", "dur", moe::Switch, "enable journaling (currently the default)");
- options->addOptionChaining("dur", "dur", moe::Switch,
- "enable journaling (currently the default)");
+ options->addOptionChaining("nodur", "nodur", moe::Switch, "disable journaling");
- options->addOptionChaining("nodur", "nodur", moe::Switch, "disable journaling");
+ options->addOptionChaining("seed", "seed", moe::UnsignedLongLong, "random number seed");
- options->addOptionChaining("seed", "seed", moe::UnsignedLongLong, "random number seed");
+ options->addOptionChaining("runs", "runs", moe::Int, "number of times to run each test");
- options->addOptionChaining("runs", "runs", moe::Int, "number of times to run each test");
+ options->addOptionChaining(
+ "perfHist", "perfHist", moe::Unsigned, "number of back runs of perf stats to display");
- options->addOptionChaining("perfHist", "perfHist", moe::Unsigned,
- "number of back runs of perf stats to display");
+ options->addOptionChaining(
+ "storage.engine", "storageEngine", moe::String, "what storage engine to use")
+ .setDefault(moe::Value(std::string("mmapv1")));
- options->addOptionChaining("storage.engine", "storageEngine", moe::String,
- "what storage engine to use")
- .setDefault(moe::Value(std::string("mmapv1")));
+ options->addOptionChaining("suites", "suites", moe::StringVector, "test suites to run")
+ .hidden()
+ .positional(1, -1);
- options->addOptionChaining("suites", "suites", moe::StringVector, "test suites to run")
- .hidden()
- .positional(1, -1);
+ options->addOptionChaining(
+ "nopreallocj", "nopreallocj", moe::Switch, "disable journal prealloc").hidden();
- options->addOptionChaining("nopreallocj", "nopreallocj", moe::Switch,
- "disable journal prealloc")
- .hidden();
+ return Status::OK();
+}
- return Status::OK();
- }
+std::string getTestFrameworkHelp(const StringData& name, const moe::OptionSection& options) {
+ StringBuilder sb;
+ sb << "usage: " << name << " [options] [suite]...\n" << options.helpString()
+ << "suite: run the specified test suite(s) only\n";
+ return sb.str();
+}
- std::string getTestFrameworkHelp(const StringData& name, const moe::OptionSection& options) {
- StringBuilder sb;
- sb << "usage: " << name << " [options] [suite]...\n"
- << options.helpString() << "suite: run the specified test suite(s) only\n";
- return sb.str();
+bool handlePreValidationTestFrameworkOptions(const moe::Environment& params,
+ const std::vector<std::string>& args) {
+ if (params.count("help")) {
+ std::cout << getTestFrameworkHelp(args[0], moe::startupOptions) << std::endl;
+ return false;
}
- bool handlePreValidationTestFrameworkOptions(const moe::Environment& params,
- const std::vector<std::string>& args) {
- if (params.count("help")) {
- std::cout << getTestFrameworkHelp(args[0], moe::startupOptions) << std::endl;
- return false;
+ if (params.count("list")) {
+ std::vector<std::string> suiteNames = mongo::unittest::getAllSuiteNames();
+ for (std::vector<std::string>::const_iterator i = suiteNames.begin(); i != suiteNames.end();
+ ++i) {
+ std::cout << *i << std::endl;
}
+ return false;
+ }
- if (params.count("list")) {
- std::vector<std::string> suiteNames = mongo::unittest::getAllSuiteNames();
- for ( std::vector<std::string>::const_iterator i = suiteNames.begin();
- i != suiteNames.end(); ++i ) {
-
- std::cout << *i << std::endl;
- }
- return false;
- }
+ return true;
+}
- return true;
+Status storeTestFrameworkOptions(const moe::Environment& params,
+ const std::vector<std::string>& args) {
+ if (params.count("dbpath")) {
+ frameworkGlobalParams.dbpathSpec = params["dbpath"].as<string>();
}
- Status storeTestFrameworkOptions(const moe::Environment& params,
- const std::vector<std::string>& args) {
-
- if (params.count("dbpath")) {
- frameworkGlobalParams.dbpathSpec = params["dbpath"].as<string>();
- }
+ if (params.count("seed")) {
+ frameworkGlobalParams.seed = params["seed"].as<unsigned long long>();
+ }
- if (params.count("seed")) {
- frameworkGlobalParams.seed = params["seed"].as<unsigned long long>();
- }
+ if (params.count("runs")) {
+ frameworkGlobalParams.runsPerTest = params["runs"].as<int>();
+ }
- if (params.count("runs")) {
- frameworkGlobalParams.runsPerTest = params["runs"].as<int>();
- }
+ if (params.count("perfHist")) {
+ frameworkGlobalParams.perfHist = params["perfHist"].as<unsigned>();
+ }
- if (params.count("perfHist")) {
- frameworkGlobalParams.perfHist = params["perfHist"].as<unsigned>();
- }
+ bool nodur = false;
+ if (params.count("nodur")) {
+ nodur = true;
+ storageGlobalParams.dur = false;
+ }
+ if (params.count("dur") || storageGlobalParams.dur) {
+ storageGlobalParams.dur = true;
+ }
- bool nodur = false;
- if( params.count("nodur") ) {
- nodur = true;
- storageGlobalParams.dur = false;
- }
- if( params.count("dur") || storageGlobalParams.dur ) {
- storageGlobalParams.dur = true;
- }
+ if (params.count("nopreallocj")) {
+ mmapv1GlobalOptions.preallocj = false;
+ }
- if( params.count("nopreallocj") ) {
- mmapv1GlobalOptions.preallocj = false;
- }
+ if (params.count("debug") || params.count("verbose")) {
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(1));
+ }
- if (params.count("debug") || params.count("verbose") ) {
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(1));
- }
+ boost::filesystem::path p(frameworkGlobalParams.dbpathSpec);
- boost::filesystem::path p(frameworkGlobalParams.dbpathSpec);
-
- /* remove the contents of the test directory if it exists. */
- try {
- if (boost::filesystem::exists(p)) {
- if (!boost::filesystem::is_directory(p)) {
- StringBuilder sb;
- sb << "ERROR: path \"" << p.string() << "\" is not a directory";
- sb << getTestFrameworkHelp(args[0], moe::startupOptions);
- return Status(ErrorCodes::BadValue, sb.str());
- }
- boost::filesystem::directory_iterator end_iter;
- for (boost::filesystem::directory_iterator dir_iter(p);
- dir_iter != end_iter; ++dir_iter) {
- boost::filesystem::remove_all(*dir_iter);
- }
+ /* remove the contents of the test directory if it exists. */
+ try {
+ if (boost::filesystem::exists(p)) {
+ if (!boost::filesystem::is_directory(p)) {
+ StringBuilder sb;
+ sb << "ERROR: path \"" << p.string() << "\" is not a directory";
+ sb << getTestFrameworkHelp(args[0], moe::startupOptions);
+ return Status(ErrorCodes::BadValue, sb.str());
}
- else {
- boost::filesystem::create_directory(p);
+ boost::filesystem::directory_iterator end_iter;
+ for (boost::filesystem::directory_iterator dir_iter(p); dir_iter != end_iter;
+ ++dir_iter) {
+ boost::filesystem::remove_all(*dir_iter);
}
+ } else {
+ boost::filesystem::create_directory(p);
}
- catch (const boost::filesystem::filesystem_error& e) {
- StringBuilder sb;
- sb << "boost::filesystem threw exception: " << e.what();
- return Status(ErrorCodes::BadValue, sb.str());
- }
-
- string dbpathString = p.string();
- storageGlobalParams.dbpath = dbpathString.c_str();
+ } catch (const boost::filesystem::filesystem_error& e) {
+ StringBuilder sb;
+ sb << "boost::filesystem threw exception: " << e.what();
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
- mmapv1GlobalOptions.prealloc = false;
+ string dbpathString = p.string();
+ storageGlobalParams.dbpath = dbpathString.c_str();
- // dbtest defaults to smallfiles
- mmapv1GlobalOptions.smallfiles = true;
- if( params.count("bigfiles") ) {
- storageGlobalParams.dur = true;
- }
+ mmapv1GlobalOptions.prealloc = false;
- DEV log() << "_DEBUG build" << endl;
- if( sizeof(void*)==4 )
- log() << "32bit" << endl;
- log() << "random seed: " << frameworkGlobalParams.seed << endl;
-
- if( time(0) % 3 == 0 && !nodur ) {
- if (!storageGlobalParams.dur) {
- storageGlobalParams.dur = true;
- log() << "****************" << endl;
- log() << "running with journaling enabled to test that. dbtests will do this "
- << "occasionally even if --dur is not specified." << endl;
- log() << "****************" << endl;
- }
- }
+ // dbtest defaults to smallfiles
+ mmapv1GlobalOptions.smallfiles = true;
+ if (params.count("bigfiles")) {
+ storageGlobalParams.dur = true;
+ }
- storageGlobalParams.engine = params["storage.engine"].as<string>();
+ DEV log() << "_DEBUG build" << endl;
+ if (sizeof(void*) == 4)
+ log() << "32bit" << endl;
+ log() << "random seed: " << frameworkGlobalParams.seed << endl;
- if (params.count("suites")) {
- frameworkGlobalParams.suites = params["suites"].as< vector<string> >();
+ if (time(0) % 3 == 0 && !nodur) {
+ if (!storageGlobalParams.dur) {
+ storageGlobalParams.dur = true;
+ log() << "****************" << endl;
+ log() << "running with journaling enabled to test that. dbtests will do this "
+ << "occasionally even if --dur is not specified." << endl;
+ log() << "****************" << endl;
}
+ }
- frameworkGlobalParams.filter = "";
- if ( params.count( "filter" ) ) {
- frameworkGlobalParams.filter = params["filter"].as<string>();
- }
+ storageGlobalParams.engine = params["storage.engine"].as<string>();
- if (debug && storageGlobalParams.dur) {
- log() << "_DEBUG: automatically enabling mmapv1GlobalOptions.journalOptions=8 "
- << "(JournalParanoid)" << endl;
- // this was commented out. why too slow or something?
- mmapv1GlobalOptions.journalOptions |= MMAPV1Options::JournalParanoid;
- }
+ if (params.count("suites")) {
+ frameworkGlobalParams.suites = params["suites"].as<vector<string>>();
+ }
- return Status::OK();
+ frameworkGlobalParams.filter = "";
+ if (params.count("filter")) {
+ frameworkGlobalParams.filter = params["filter"].as<string>();
}
+ if (debug && storageGlobalParams.dur) {
+ log() << "_DEBUG: automatically enabling mmapv1GlobalOptions.journalOptions=8 "
+ << "(JournalParanoid)" << endl;
+ // this was commented out. why too slow or something?
+ mmapv1GlobalOptions.journalOptions |= MMAPV1Options::JournalParanoid;
+ }
+
+ return Status::OK();
+}
}
diff --git a/src/mongo/dbtests/framework_options.h b/src/mongo/dbtests/framework_options.h
index 4107b204174..f85c04117d3 100644
--- a/src/mongo/dbtests/framework_options.h
+++ b/src/mongo/dbtests/framework_options.h
@@ -35,36 +35,36 @@
namespace mongo {
- namespace optionenvironment {
- class OptionSection;
- class Environment;
- } // namespace optionenvironment
+namespace optionenvironment {
+class OptionSection;
+class Environment;
+} // namespace optionenvironment
- namespace moe = mongo::optionenvironment;
+namespace moe = mongo::optionenvironment;
- struct FrameworkGlobalParams {
- unsigned perfHist;
- unsigned long long seed;
- int runsPerTest;
- std::string dbpathSpec;
- std::vector<std::string> suites;
- std::string filter;
- };
+struct FrameworkGlobalParams {
+ unsigned perfHist;
+ unsigned long long seed;
+ int runsPerTest;
+ std::string dbpathSpec;
+ std::vector<std::string> suites;
+ std::string filter;
+};
- extern FrameworkGlobalParams frameworkGlobalParams;
+extern FrameworkGlobalParams frameworkGlobalParams;
- Status addTestFrameworkOptions(moe::OptionSection* options);
+Status addTestFrameworkOptions(moe::OptionSection* options);
- std::string getTestFrameworkHelp(const StringData& name, const moe::OptionSection& options);
+std::string getTestFrameworkHelp(const StringData& name, const moe::OptionSection& options);
- /**
- * Handle options that should come before validation, such as "help".
- *
- * Returns false if an option was found that implies we should prematurely exit with success.
- */
- bool handlePreValidationTestFrameworkOptions(const moe::Environment& params,
+/**
+ * Handle options that should come before validation, such as "help".
+ *
+ * Returns false if an option was found that implies we should prematurely exit with success.
+ */
+bool handlePreValidationTestFrameworkOptions(const moe::Environment& params,
const std::vector<std::string>& args);
- Status storeTestFrameworkOptions(const moe::Environment& params,
- const std::vector<std::string>& args);
+Status storeTestFrameworkOptions(const moe::Environment& params,
+ const std::vector<std::string>& args);
}
diff --git a/src/mongo/dbtests/framework_options_init.cpp b/src/mongo/dbtests/framework_options_init.cpp
index d8d20e63445..d2764e6b46a 100644
--- a/src/mongo/dbtests/framework_options_init.cpp
+++ b/src/mongo/dbtests/framework_options_init.cpp
@@ -35,29 +35,28 @@
#include "mongo/util/quick_exit.h"
namespace mongo {
- MONGO_GENERAL_STARTUP_OPTIONS_REGISTER(FrameworkOptions)(InitializerContext* context) {
- return addTestFrameworkOptions(&moe::startupOptions);
- }
+MONGO_GENERAL_STARTUP_OPTIONS_REGISTER(FrameworkOptions)(InitializerContext* context) {
+ return addTestFrameworkOptions(&moe::startupOptions);
+}
- MONGO_STARTUP_OPTIONS_VALIDATE(FrameworkOptions)(InitializerContext* context) {
- if (!handlePreValidationTestFrameworkOptions(moe::startupOptionsParsed, context->args())) {
- quickExit(EXIT_SUCCESS);
- }
- Status ret = moe::startupOptionsParsed.validate();
- if (!ret.isOK()) {
- return ret;
- }
- return Status::OK();
+MONGO_STARTUP_OPTIONS_VALIDATE(FrameworkOptions)(InitializerContext* context) {
+ if (!handlePreValidationTestFrameworkOptions(moe::startupOptionsParsed, context->args())) {
+ quickExit(EXIT_SUCCESS);
+ }
+ Status ret = moe::startupOptionsParsed.validate();
+ if (!ret.isOK()) {
+ return ret;
}
+ return Status::OK();
+}
- MONGO_STARTUP_OPTIONS_STORE(FrameworkOptions)(InitializerContext* context) {
- Status ret = storeTestFrameworkOptions(moe::startupOptionsParsed, context->args());
- if (!ret.isOK()) {
- std::cerr << ret.toString() << std::endl;
- std::cerr << "try '" << context->args()[0] << " --help' for more information"
- << std::endl;
- quickExit(EXIT_BADOPTIONS);
- }
- return Status::OK();
+MONGO_STARTUP_OPTIONS_STORE(FrameworkOptions)(InitializerContext* context) {
+ Status ret = storeTestFrameworkOptions(moe::startupOptionsParsed, context->args());
+ if (!ret.isOK()) {
+ std::cerr << ret.toString() << std::endl;
+ std::cerr << "try '" << context->args()[0] << " --help' for more information" << std::endl;
+ quickExit(EXIT_BADOPTIONS);
}
+ return Status::OK();
+}
}
diff --git a/src/mongo/dbtests/gle_test.cpp b/src/mongo/dbtests/gle_test.cpp
index 4514971578b..4e01ed15c18 100644
--- a/src/mongo/dbtests/gle_test.cpp
+++ b/src/mongo/dbtests/gle_test.cpp
@@ -38,78 +38,81 @@ using mongo::MsgAssertionException;
*/
namespace {
- using std::string;
-
- static const char* const _ns = "unittests.gle";
-
- /**
- * Verify that when the command fails we get back an error message.
- */
- class GetLastErrorCommandFailure {
- public:
- void run() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.insert(_ns, BSON( "test" << "test"));
-
- // Cannot mix fsync + j, will make command fail
- string gleString = client.getLastError(true, true, 10, 10);
- ASSERT_NOT_EQUALS(gleString, "");
- }
- };
-
- /**
- * Verify that the write succeeds
- */
- class GetLastErrorClean {
- public:
- void run() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.insert(_ns, BSON( "test" << "test"));
-
- // Make sure there was no error
- string gleString = client.getLastError();
- ASSERT_EQUALS(gleString, "");
- }
- };
-
- /**
- * Verify that the write succeed first, then error on dup
- */
- class GetLastErrorFromDup {
- public:
- void run() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.insert(_ns, BSON( "_id" << 1));
-
- // Make sure there was no error
- string gleString = client.getLastError();
- ASSERT_EQUALS(gleString, "");
-
- //insert dup
- client.insert(_ns, BSON( "_id" << 1));
- // Make sure there was an error
- gleString = client.getLastError();
- ASSERT_NOT_EQUALS(gleString, "");
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "gle" ) {
- }
-
- void setupTests() {
- add< GetLastErrorClean >();
- add< GetLastErrorCommandFailure >();
- add< GetLastErrorFromDup >();
- }
- };
-
- SuiteInstance<All> myall;
+using std::string;
+
+static const char* const _ns = "unittests.gle";
+
+/**
+ * Verify that when the command fails we get back an error message.
+ */
+class GetLastErrorCommandFailure {
+public:
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.insert(_ns,
+ BSON("test"
+ << "test"));
+
+ // Cannot mix fsync + j, will make command fail
+ string gleString = client.getLastError(true, true, 10, 10);
+ ASSERT_NOT_EQUALS(gleString, "");
+ }
+};
+
+/**
+ * Verify that the write succeeds
+ */
+class GetLastErrorClean {
+public:
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.insert(_ns,
+ BSON("test"
+ << "test"));
+
+ // Make sure there was no error
+ string gleString = client.getLastError();
+ ASSERT_EQUALS(gleString, "");
+ }
+};
+
+/**
+ * Verify that the write succeed first, then error on dup
+ */
+class GetLastErrorFromDup {
+public:
+ void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.insert(_ns, BSON("_id" << 1));
+
+ // Make sure there was no error
+ string gleString = client.getLastError();
+ ASSERT_EQUALS(gleString, "");
+
+ // insert dup
+ client.insert(_ns, BSON("_id" << 1));
+ // Make sure there was an error
+ gleString = client.getLastError();
+ ASSERT_NOT_EQUALS(gleString, "");
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("gle") {}
+
+ void setupTests() {
+ add<GetLastErrorClean>();
+ add<GetLastErrorCommandFailure>();
+ add<GetLastErrorFromDup>();
+ }
+};
+
+SuiteInstance<All> myall;
}
diff --git a/src/mongo/dbtests/gridfstest.cpp b/src/mongo/dbtests/gridfstest.cpp
index ec8fde0c59b..4edff7cc184 100644
--- a/src/mongo/dbtests/gridfstest.cpp
+++ b/src/mongo/dbtests/gridfstest.cpp
@@ -39,34 +39,32 @@ using mongo::GridFS;
using mongo::MsgAssertionException;
namespace {
-
- class SetChunkSizeTest {
- public:
- virtual void run() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
- GridFS grid(client, "gridtest");
- grid.setChunkSize( 5 );
+class SetChunkSizeTest {
+public:
+ virtual void run() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
- ASSERT_EQUALS( 5U, grid.getChunkSize() );
- ASSERT_THROWS( grid.setChunkSize( 0 ), MsgAssertionException );
- ASSERT_EQUALS( 5U, grid.getChunkSize() );
- }
+ GridFS grid(client, "gridtest");
+ grid.setChunkSize(5);
- virtual ~SetChunkSizeTest() {}
- };
+ ASSERT_EQUALS(5U, grid.getChunkSize());
+ ASSERT_THROWS(grid.setChunkSize(0), MsgAssertionException);
+ ASSERT_EQUALS(5U, grid.getChunkSize());
+ }
- class All : public Suite {
- public:
- All() : Suite( "gridfs" ) {
- }
+ virtual ~SetChunkSizeTest() {}
+};
- void setupTests() {
- add< SetChunkSizeTest >();
- }
- };
+class All : public Suite {
+public:
+ All() : Suite("gridfs") {}
- SuiteInstance<All> myall;
-}
+ void setupTests() {
+ add<SetChunkSizeTest>();
+ }
+};
+SuiteInstance<All> myall;
+}
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 346ba85a799..bfa1b2821ba 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -28,146 +28,145 @@
namespace IndexCatalogTests {
- static const char* const _ns = "unittests.indexcatalog";
-
- class IndexIteratorTests {
- public:
- IndexIteratorTests() {
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- Client::Context ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
-
- _db = ctx.db();
- _coll = _db->createCollection(&txn, _ns);
- _catalog = _coll->getIndexCatalog();
- wuow.commit();
- }
-
- ~IndexIteratorTests() {
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- Client::Context ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
-
- _db->dropCollection(&txn, _ns);
- wuow.commit();
- }
-
- void run() {
- OperationContextImpl txn;
- Client::WriteContext ctx(&txn, _ns);
-
- int numFinishedIndexesStart = _catalog->numIndexesReady(&txn);
-
- dbtests::createIndex(&txn, _ns, BSON("x" << 1));
- dbtests::createIndex(&txn, _ns, BSON("y" << 1));
-
- ASSERT_TRUE(_catalog->numIndexesReady(&txn) == numFinishedIndexesStart+2);
-
- IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&txn,false);
- int indexesIterated = 0;
- bool foundIndex = false;
- while (ii.more()) {
- IndexDescriptor* indexDesc = ii.next();
- indexesIterated++;
- BSONObjIterator boit(indexDesc->infoObj());
- while (boit.more() && !foundIndex) {
- BSONElement e = boit.next();
- if (str::equals(e.fieldName(), "name") &&
- str::equals(e.valuestrsafe(), "y_1")) {
- foundIndex = true;
- break;
- }
+static const char* const _ns = "unittests.indexcatalog";
+
+class IndexIteratorTests {
+public:
+ IndexIteratorTests() {
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Client::Context ctx(&txn, _ns);
+ WriteUnitOfWork wuow(&txn);
+
+ _db = ctx.db();
+ _coll = _db->createCollection(&txn, _ns);
+ _catalog = _coll->getIndexCatalog();
+ wuow.commit();
+ }
+
+ ~IndexIteratorTests() {
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Client::Context ctx(&txn, _ns);
+ WriteUnitOfWork wuow(&txn);
+
+ _db->dropCollection(&txn, _ns);
+ wuow.commit();
+ }
+
+ void run() {
+ OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, _ns);
+
+ int numFinishedIndexesStart = _catalog->numIndexesReady(&txn);
+
+ dbtests::createIndex(&txn, _ns, BSON("x" << 1));
+ dbtests::createIndex(&txn, _ns, BSON("y" << 1));
+
+ ASSERT_TRUE(_catalog->numIndexesReady(&txn) == numFinishedIndexesStart + 2);
+
+ IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&txn, false);
+ int indexesIterated = 0;
+ bool foundIndex = false;
+ while (ii.more()) {
+ IndexDescriptor* indexDesc = ii.next();
+ indexesIterated++;
+ BSONObjIterator boit(indexDesc->infoObj());
+ while (boit.more() && !foundIndex) {
+ BSONElement e = boit.next();
+ if (str::equals(e.fieldName(), "name") && str::equals(e.valuestrsafe(), "y_1")) {
+ foundIndex = true;
+ break;
}
}
-
- ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&txn));
- ASSERT_TRUE(foundIndex);
}
- private:
- IndexCatalog* _catalog;
- Collection* _coll;
- Database* _db;
- };
-
- /**
- * Test for IndexCatalog::refreshEntry().
- */
- class RefreshEntry {
- public:
- RefreshEntry() {
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- Client::Context ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
-
- _db = ctx.db();
- _coll = _db->createCollection(&txn, _ns);
- _catalog = _coll->getIndexCatalog();
- wuow.commit();
- }
-
- ~RefreshEntry () {
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- Client::Context ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
-
- _db->dropCollection(&txn, _ns);
- wuow.commit();
- }
-
- void run() {
- OperationContextImpl txn;
- Client::WriteContext ctx(&txn, _ns);
- const std::string indexName = "x_1";
-
- ASSERT_OK(dbtests::createIndexFromSpec(&txn, _ns, BSON("name" << indexName <<
- "ns" << _ns <<
- "key" << BSON("x" << 1) <<
- "expireAfterSeconds" << 5)));
+ ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&txn));
+ ASSERT_TRUE(foundIndex);
+ }
- const IndexDescriptor* desc = _catalog->findIndexByName(&txn, indexName);
- ASSERT(desc);
- ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong());
+private:
+ IndexCatalog* _catalog;
+ Collection* _coll;
+ Database* _db;
+};
- // Change value of "expireAfterSeconds" on disk.
- WriteUnitOfWork wuow(&txn);
- _coll->getCatalogEntry()->updateTTLSetting(&txn, "x_1", 10);
- wuow.commit();
-
- // Verify that the catalog does not yet know of the change.
- desc = _catalog->findIndexByName(&txn, indexName);
- ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong());
-
- // Notify the catalog of the change.
- desc = _catalog->refreshEntry(&txn, desc);
-
- // Test that the catalog reflects the change.
- ASSERT_EQUALS(10, desc->infoObj()["expireAfterSeconds"].numberLong());
- }
-
- private:
- IndexCatalog* _catalog;
- Collection* _coll;
- Database* _db;
- };
-
- class IndexCatalogTests : public Suite {
- public:
- IndexCatalogTests() : Suite( "indexcatalogtests" ) {
- }
- void setupTests() {
- add<IndexIteratorTests>();
- add<RefreshEntry>();
- }
- };
-
- SuiteInstance<IndexCatalogTests> indexCatalogTests;
+/**
+ * Test for IndexCatalog::refreshEntry().
+ */
+class RefreshEntry {
+public:
+ RefreshEntry() {
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Client::Context ctx(&txn, _ns);
+ WriteUnitOfWork wuow(&txn);
+
+ _db = ctx.db();
+ _coll = _db->createCollection(&txn, _ns);
+ _catalog = _coll->getIndexCatalog();
+ wuow.commit();
+ }
+
+ ~RefreshEntry() {
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Client::Context ctx(&txn, _ns);
+ WriteUnitOfWork wuow(&txn);
+
+ _db->dropCollection(&txn, _ns);
+ wuow.commit();
+ }
+
+ void run() {
+ OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, _ns);
+ const std::string indexName = "x_1";
+
+ ASSERT_OK(dbtests::createIndexFromSpec(&txn,
+ _ns,
+ BSON("name" << indexName << "ns" << _ns << "key"
+ << BSON("x" << 1) << "expireAfterSeconds"
+ << 5)));
+
+ const IndexDescriptor* desc = _catalog->findIndexByName(&txn, indexName);
+ ASSERT(desc);
+ ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong());
+
+ // Change value of "expireAfterSeconds" on disk.
+ WriteUnitOfWork wuow(&txn);
+ _coll->getCatalogEntry()->updateTTLSetting(&txn, "x_1", 10);
+ wuow.commit();
+
+ // Verify that the catalog does not yet know of the change.
+ desc = _catalog->findIndexByName(&txn, indexName);
+ ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong());
+
+ // Notify the catalog of the change.
+ desc = _catalog->refreshEntry(&txn, desc);
+
+ // Test that the catalog reflects the change.
+ ASSERT_EQUALS(10, desc->infoObj()["expireAfterSeconds"].numberLong());
+ }
+
+private:
+ IndexCatalog* _catalog;
+ Collection* _coll;
+ Database* _db;
+};
+
+class IndexCatalogTests : public Suite {
+public:
+ IndexCatalogTests() : Suite("indexcatalogtests") {}
+ void setupTests() {
+ add<IndexIteratorTests>();
+ add<RefreshEntry>();
+ }
+};
+
+SuiteInstance<IndexCatalogTests> indexCatalogTests;
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 30ac55d7389..ea21689d255 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -46,32 +46,30 @@
namespace IndexUpdateTests {
- using boost::scoped_ptr;
+using boost::scoped_ptr;
- static const char* const _ns = "unittests.indexupdate";
+static const char* const _ns = "unittests.indexupdate";
- /**
- * Test fixture for a write locked test using collection _ns. Includes functionality to
- * partially construct a new IndexDetails in a manner that supports proper cleanup in
- * dropCollection().
- */
- class IndexBuildBase {
- public:
- IndexBuildBase() :
- _ctx(&_txn, _ns),
- _client(&_txn) {
+/**
+ * Test fixture for a write locked test using collection _ns. Includes functionality to
+ * partially construct a new IndexDetails in a manner that supports proper cleanup in
+ * dropCollection().
+ */
+class IndexBuildBase {
+public:
+ IndexBuildBase() : _ctx(&_txn, _ns), _client(&_txn) {
+ _client.createCollection(_ns);
+ }
+ ~IndexBuildBase() {
+ _client.dropCollection(_ns);
+ getGlobalEnvironment()->unsetKillAllOperations();
+ }
+ Collection* collection() {
+ return _ctx.getCollection();
+ }
- _client.createCollection( _ns );
- }
- ~IndexBuildBase() {
- _client.dropCollection( _ns );
- getGlobalEnvironment()->unsetKillAllOperations();
- }
- Collection* collection() {
- return _ctx.getCollection();
- }
- protected:
- // QUERY_MIGRATION
+protected:
+// QUERY_MIGRATION
#if 0
/** @return IndexDetails for a new index on a:1, with the info field populated. */
IndexDescriptor* addIndexWithInfo() {
@@ -97,36 +95,35 @@ namespace IndexUpdateTests {
}
#endif
- Status createIndex(const std::string& dbname, const BSONObj& indexSpec);
+ Status createIndex(const std::string& dbname, const BSONObj& indexSpec);
- bool buildIndexInterrupted(const BSONObj& key, bool allowInterruption) {
- try {
- MultiIndexBlock indexer(&_txn, collection());
- if (allowInterruption)
- indexer.allowInterruption();
+ bool buildIndexInterrupted(const BSONObj& key, bool allowInterruption) {
+ try {
+ MultiIndexBlock indexer(&_txn, collection());
+ if (allowInterruption)
+ indexer.allowInterruption();
- uassertStatusOK(indexer.init(key));
- uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(&_txn);
- indexer.commit();
- wunit.commit();
- }
- catch (const DBException& e) {
- if (ErrorCodes::isInterruption(ErrorCodes::Error(e.getCode())))
- return true;
+ uassertStatusOK(indexer.init(key));
+ uassertStatusOK(indexer.insertAllDocumentsInCollection());
+ WriteUnitOfWork wunit(&_txn);
+ indexer.commit();
+ wunit.commit();
+ } catch (const DBException& e) {
+ if (ErrorCodes::isInterruption(ErrorCodes::Error(e.getCode())))
+ return true;
- throw;
- }
- return false;
+ throw;
}
+ return false;
+ }
- OperationContextImpl _txn;
- Client::WriteContext _ctx;
- DBDirectClient _client;
- };
+ OperationContextImpl _txn;
+ Client::WriteContext _ctx;
+ DBDirectClient _client;
+};
- /** addKeysToPhaseOne() adds keys from a collection's documents to an external sorter. */
- // QUERY_MIGRATION
+/** addKeysToPhaseOne() adds keys from a collection's documents to an external sorter. */
+// QUERY_MIGRATION
#if 0
class AddKeysToPhaseOne : public IndexBuildBase {
public:
@@ -207,7 +204,7 @@ namespace IndexUpdateTests {
};
#endif
- // QUERY_MIGRATION
+// QUERY_MIGRATION
#if 0
/** buildBottomUpPhases2And3() builds a btree from the keys in an external sorter. */
class BuildBottomUp : public IndexBuildBase {
@@ -267,7 +264,7 @@ namespace IndexUpdateTests {
};
#endif
- // QUERY_MIGRATION
+// QUERY_MIGRATION
#if 0
/** buildBottomUpPhases2And3() aborts if the current operation is interrupted. */
class InterruptBuildBottomUp : public IndexBuildBase {
@@ -340,296 +337,307 @@ namespace IndexUpdateTests {
bool _mayInterrupt;
};
#endif
- /** Index creation ignores unique constraints when told to. */
- template <bool background>
- class InsertBuildIgnoreUnique : public IndexBuildBase {
- public:
- void run() {
- // Create a new collection.
- Database* db = _ctx.ctx().db();
- Collection* coll;
- {
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection( &_txn, _ns );
- coll = db->createCollection( &_txn, _ns );
-
- coll->insertDocument( &_txn, BSON( "_id" << 1 << "a" << "dup" ), true );
- coll->insertDocument( &_txn, BSON( "_id" << 2 << "a" << "dup" ), true );
- wunit.commit();
- }
-
- MultiIndexBlock indexer(&_txn, coll);
- indexer.allowBackgroundBuilding();
- indexer.allowInterruption();
- indexer.ignoreUniqueConstraint();
-
- const BSONObj spec = BSON("name" << "a"
- << "ns" << coll->ns().ns()
- << "key" << BSON("a" << 1)
- << "unique" << true
- << "background" << background);
-
- ASSERT_OK(indexer.init(spec));
- ASSERT_OK(indexer.insertAllDocumentsInCollection());
-
+/** Index creation ignores unique constraints when told to. */
+template <bool background>
+class InsertBuildIgnoreUnique : public IndexBuildBase {
+public:
+ void run() {
+ // Create a new collection.
+ Database* db = _ctx.ctx().db();
+ Collection* coll;
+ {
WriteUnitOfWork wunit(&_txn);
- indexer.commit();
+ db->dropCollection(&_txn, _ns);
+ coll = db->createCollection(&_txn, _ns);
+
+ coll->insertDocument(&_txn,
+ BSON("_id" << 1 << "a"
+ << "dup"),
+ true);
+ coll->insertDocument(&_txn,
+ BSON("_id" << 2 << "a"
+ << "dup"),
+ true);
wunit.commit();
}
- };
- /** Index creation enforces unique constraints unless told not to. */
- template <bool background>
- class InsertBuildEnforceUnique : public IndexBuildBase {
- public:
- void run() {
- // Create a new collection.
- Database* db = _ctx.ctx().db();
- Collection* coll;
- {
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection( &_txn, _ns );
- coll = db->createCollection( &_txn, _ns );
-
- coll->insertDocument( &_txn, BSON( "_id" << 1 << "a" << "dup" ), true );
- coll->insertDocument( &_txn, BSON( "_id" << 2 << "a" << "dup" ), true );
- wunit.commit();
- }
+ MultiIndexBlock indexer(&_txn, coll);
+ indexer.allowBackgroundBuilding();
+ indexer.allowInterruption();
+ indexer.ignoreUniqueConstraint();
- MultiIndexBlock indexer(&_txn, coll);
- indexer.allowBackgroundBuilding();
- indexer.allowInterruption();
- // indexer.ignoreUniqueConstraint(); // not calling this
+ const BSONObj spec = BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "unique"
+ << true << "background" << background);
- const BSONObj spec = BSON("name" << "a"
- << "ns" << coll->ns().ns()
- << "key" << BSON("a" << 1)
- << "unique" << true
- << "background" << background);
+ ASSERT_OK(indexer.init(spec));
+ ASSERT_OK(indexer.insertAllDocumentsInCollection());
- ASSERT_OK(indexer.init(spec));
- const Status status = indexer.insertAllDocumentsInCollection();
- ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey);
+ WriteUnitOfWork wunit(&_txn);
+ indexer.commit();
+ wunit.commit();
+ }
+};
+
+/** Index creation enforces unique constraints unless told not to. */
+template <bool background>
+class InsertBuildEnforceUnique : public IndexBuildBase {
+public:
+ void run() {
+ // Create a new collection.
+ Database* db = _ctx.ctx().db();
+ Collection* coll;
+ {
+ WriteUnitOfWork wunit(&_txn);
+ db->dropCollection(&_txn, _ns);
+ coll = db->createCollection(&_txn, _ns);
+
+ coll->insertDocument(&_txn,
+ BSON("_id" << 1 << "a"
+ << "dup"),
+ true);
+ coll->insertDocument(&_txn,
+ BSON("_id" << 2 << "a"
+ << "dup"),
+ true);
+ wunit.commit();
}
- };
- /** Index creation fills a passed-in set of dups rather than failing. */
- template <bool background>
- class InsertBuildFillDups : public IndexBuildBase {
- public:
- void run() {
- // Create a new collection.
- Database* db = _ctx.ctx().db();
- Collection* coll;
- RecordId loc1;
- RecordId loc2;
- {
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection( &_txn, _ns );
- coll = db->createCollection( &_txn, _ns );
-
- StatusWith<RecordId> swLoc1 = coll->insertDocument(&_txn,
- BSON("_id" << 1 << "a" << "dup"),
- true);
- StatusWith<RecordId> swLoc2 = coll->insertDocument(&_txn,
- BSON("_id" << 2 << "a" << "dup"),
- true);
- ASSERT_OK(swLoc1.getStatus());
- ASSERT_OK(swLoc2.getStatus());
- loc1 = swLoc1.getValue();
- loc2 = swLoc2.getValue();
- wunit.commit();
- }
+ MultiIndexBlock indexer(&_txn, coll);
+ indexer.allowBackgroundBuilding();
+ indexer.allowInterruption();
+ // indexer.ignoreUniqueConstraint(); // not calling this
- MultiIndexBlock indexer(&_txn, coll);
- indexer.allowBackgroundBuilding();
- indexer.allowInterruption();
- // indexer.ignoreUniqueConstraint(); // not calling this
+ const BSONObj spec = BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "unique"
+ << true << "background" << background);
- const BSONObj spec = BSON("name" << "a"
- << "ns" << coll->ns().ns()
- << "key" << BSON("a" << 1)
- << "unique" << true
- << "background" << background);
+ ASSERT_OK(indexer.init(spec));
+ const Status status = indexer.insertAllDocumentsInCollection();
+ ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey);
+ }
+};
+
+/** Index creation fills a passed-in set of dups rather than failing. */
+template <bool background>
+class InsertBuildFillDups : public IndexBuildBase {
+public:
+ void run() {
+ // Create a new collection.
+ Database* db = _ctx.ctx().db();
+ Collection* coll;
+ RecordId loc1;
+ RecordId loc2;
+ {
+ WriteUnitOfWork wunit(&_txn);
+ db->dropCollection(&_txn, _ns);
+ coll = db->createCollection(&_txn, _ns);
+
+ StatusWith<RecordId> swLoc1 = coll->insertDocument(&_txn,
+ BSON("_id" << 1 << "a"
+ << "dup"),
+ true);
+ StatusWith<RecordId> swLoc2 = coll->insertDocument(&_txn,
+ BSON("_id" << 2 << "a"
+ << "dup"),
+ true);
+ ASSERT_OK(swLoc1.getStatus());
+ ASSERT_OK(swLoc2.getStatus());
+ loc1 = swLoc1.getValue();
+ loc2 = swLoc2.getValue();
+ wunit.commit();
+ }
- ASSERT_OK(indexer.init(spec));
+ MultiIndexBlock indexer(&_txn, coll);
+ indexer.allowBackgroundBuilding();
+ indexer.allowInterruption();
+ // indexer.ignoreUniqueConstraint(); // not calling this
- std::set<RecordId> dups;
- ASSERT_OK(indexer.insertAllDocumentsInCollection(&dups));
+ const BSONObj spec = BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "unique"
+ << true << "background" << background);
- // either loc1 or loc2 should be in dups but not both.
- ASSERT_EQUALS(dups.size(), 1U);
- ASSERT(dups.count(loc1) || dups.count(loc2));
- }
- };
+ ASSERT_OK(indexer.init(spec));
- /** Index creation is killed if mayInterrupt is true. */
- class InsertBuildIndexInterrupt : public IndexBuildBase {
- public:
- void run() {
- // Create a new collection.
- Database* db = _ctx.ctx().db();
- Collection* coll;
- {
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection( &_txn, _ns );
- coll = db->createCollection( &_txn, _ns );
- // Drop all indexes including id index.
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true );
- // Insert some documents with enforceQuota=true.
- int32_t nDocs = 1000;
- for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( &_txn, BSON( "a" << i ), true );
- }
- wunit.commit();
- }
- // Initialize curop.
- _txn.getCurOp()->reset();
- // Request an interrupt.
- getGlobalEnvironment()->setKillAllOperations();
- BSONObj indexInfo = BSON( "key" << BSON( "a" << 1 ) << "ns" << _ns << "name" << "a_1" );
- // The call is interrupted because mayInterrupt == true.
- ASSERT_TRUE(buildIndexInterrupted(indexInfo, true));
- // only want to interrupt the index build
- getGlobalEnvironment()->unsetKillAllOperations();
- // The new index is not listed in the index catalog because the index build failed.
- ASSERT( !coll->getIndexCatalog()->findIndexByName( &_txn, "a_1" ) );
- }
- };
+ std::set<RecordId> dups;
+ ASSERT_OK(indexer.insertAllDocumentsInCollection(&dups));
- /** Index creation is not killed if mayInterrupt is false. */
- class InsertBuildIndexInterruptDisallowed : public IndexBuildBase {
- public:
- void run() {
- // Create a new collection.
- Database* db = _ctx.ctx().db();
- Collection* coll;
- {
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection( &_txn, _ns );
- coll = db->createCollection( &_txn, _ns );
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true );
- // Insert some documents.
- int32_t nDocs = 1000;
- for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( &_txn, BSON( "a" << i ), true );
- }
- wunit.commit();
+ // either loc1 or loc2 should be in dups but not both.
+ ASSERT_EQUALS(dups.size(), 1U);
+ ASSERT(dups.count(loc1) || dups.count(loc2));
+ }
+};
+
+/** Index creation is killed if mayInterrupt is true. */
+class InsertBuildIndexInterrupt : public IndexBuildBase {
+public:
+ void run() {
+ // Create a new collection.
+ Database* db = _ctx.ctx().db();
+ Collection* coll;
+ {
+ WriteUnitOfWork wunit(&_txn);
+ db->dropCollection(&_txn, _ns);
+ coll = db->createCollection(&_txn, _ns);
+ // Drop all indexes including id index.
+ coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
+ // Insert some documents with enforceQuota=true.
+ int32_t nDocs = 1000;
+ for (int32_t i = 0; i < nDocs; ++i) {
+ coll->insertDocument(&_txn, BSON("a" << i), true);
}
- // Initialize curop.
- _txn.getCurOp()->reset();
- // Request an interrupt.
- getGlobalEnvironment()->setKillAllOperations();
- BSONObj indexInfo = BSON( "key" << BSON( "a" << 1 ) << "ns" << _ns << "name" << "a_1" );
- // The call is not interrupted because mayInterrupt == false.
- ASSERT_FALSE(buildIndexInterrupted(indexInfo, false));
- // only want to interrupt the index build
- getGlobalEnvironment()->unsetKillAllOperations();
- // The new index is listed in the index catalog because the index build completed.
- ASSERT( coll->getIndexCatalog()->findIndexByName( &_txn, "a_1" ) );
+ wunit.commit();
}
- };
-
- /** Index creation is killed when building the _id index. */
- class InsertBuildIdIndexInterrupt : public IndexBuildBase {
- public:
- void run() {
- // Recreate the collection as capped, without an _id index.
- Database* db = _ctx.ctx().db();
- Collection* coll;
- {
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection( &_txn, _ns );
- CollectionOptions options;
- options.capped = true;
- options.cappedSize = 10 * 1024;
- coll = db->createCollection( &_txn, _ns, options );
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true );
- // Insert some documents.
- int32_t nDocs = 1000;
- for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( &_txn, BSON( "_id" << i ), true );
- }
- wunit.commit();
+ // Initialize curop.
+ _txn.getCurOp()->reset();
+ // Request an interrupt.
+ getGlobalEnvironment()->setKillAllOperations();
+ BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "ns" << _ns << "name"
+ << "a_1");
+ // The call is interrupted because mayInterrupt == true.
+ ASSERT_TRUE(buildIndexInterrupted(indexInfo, true));
+ // only want to interrupt the index build
+ getGlobalEnvironment()->unsetKillAllOperations();
+ // The new index is not listed in the index catalog because the index build failed.
+ ASSERT(!coll->getIndexCatalog()->findIndexByName(&_txn, "a_1"));
+ }
+};
+
+/** Index creation is not killed if mayInterrupt is false. */
+class InsertBuildIndexInterruptDisallowed : public IndexBuildBase {
+public:
+ void run() {
+ // Create a new collection.
+ Database* db = _ctx.ctx().db();
+ Collection* coll;
+ {
+ WriteUnitOfWork wunit(&_txn);
+ db->dropCollection(&_txn, _ns);
+ coll = db->createCollection(&_txn, _ns);
+ coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
+ // Insert some documents.
+ int32_t nDocs = 1000;
+ for (int32_t i = 0; i < nDocs; ++i) {
+ coll->insertDocument(&_txn, BSON("a" << i), true);
}
- // Initialize curop.
- _txn.getCurOp()->reset();
- // Request an interrupt.
- getGlobalEnvironment()->setKillAllOperations();
- BSONObj indexInfo = BSON( "key" << BSON( "_id" << 1 ) <<
- "ns" << _ns <<
- "name" << "_id_" );
- // The call is interrupted because mayInterrupt == true.
- ASSERT_TRUE(buildIndexInterrupted(indexInfo, true));
- // only want to interrupt the index build
- getGlobalEnvironment()->unsetKillAllOperations();
- // The new index is not listed in the index catalog because the index build failed.
- ASSERT( !coll->getIndexCatalog()->findIndexByName( &_txn, "_id_" ) );
+ wunit.commit();
}
- };
-
- /** Index creation is not killed when building the _id index if mayInterrupt is false. */
- class InsertBuildIdIndexInterruptDisallowed : public IndexBuildBase {
- public:
- void run() {
- // Recreate the collection as capped, without an _id index.
- Database* db = _ctx.ctx().db();
- Collection* coll;
- {
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection( &_txn, _ns );
- CollectionOptions options;
- options.capped = true;
- options.cappedSize = 10 * 1024;
- coll = db->createCollection( &_txn, _ns, options );
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true );
- // Insert some documents.
- int32_t nDocs = 1000;
- for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( &_txn, BSON( "_id" << i ), true );
- }
- wunit.commit();
+ // Initialize curop.
+ _txn.getCurOp()->reset();
+ // Request an interrupt.
+ getGlobalEnvironment()->setKillAllOperations();
+ BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "ns" << _ns << "name"
+ << "a_1");
+ // The call is not interrupted because mayInterrupt == false.
+ ASSERT_FALSE(buildIndexInterrupted(indexInfo, false));
+ // only want to interrupt the index build
+ getGlobalEnvironment()->unsetKillAllOperations();
+ // The new index is listed in the index catalog because the index build completed.
+ ASSERT(coll->getIndexCatalog()->findIndexByName(&_txn, "a_1"));
+ }
+};
+
+/** Index creation is killed when building the _id index. */
+class InsertBuildIdIndexInterrupt : public IndexBuildBase {
+public:
+ void run() {
+ // Recreate the collection as capped, without an _id index.
+ Database* db = _ctx.ctx().db();
+ Collection* coll;
+ {
+ WriteUnitOfWork wunit(&_txn);
+ db->dropCollection(&_txn, _ns);
+ CollectionOptions options;
+ options.capped = true;
+ options.cappedSize = 10 * 1024;
+ coll = db->createCollection(&_txn, _ns, options);
+ coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
+ // Insert some documents.
+ int32_t nDocs = 1000;
+ for (int32_t i = 0; i < nDocs; ++i) {
+ coll->insertDocument(&_txn, BSON("_id" << i), true);
}
- // Initialize curop.
- _txn.getCurOp()->reset();
- // Request an interrupt.
- getGlobalEnvironment()->setKillAllOperations();
- BSONObj indexInfo = BSON( "key" << BSON( "_id" << 1 ) <<
- "ns" << _ns <<
- "name" << "_id_" );
- // The call is not interrupted because mayInterrupt == false.
- ASSERT_FALSE(buildIndexInterrupted(indexInfo, false));
- // only want to interrupt the index build
- getGlobalEnvironment()->unsetKillAllOperations();
- // The new index is listed in the index catalog because the index build succeeded.
- ASSERT( coll->getIndexCatalog()->findIndexByName( &_txn, "_id_" ) );
+ wunit.commit();
}
- };
-
- /** Helpers::ensureIndex() is not interrupted. */
- class HelpersEnsureIndexInterruptDisallowed : public IndexBuildBase {
- public:
- void run() {
+ // Initialize curop.
+ _txn.getCurOp()->reset();
+ // Request an interrupt.
+ getGlobalEnvironment()->setKillAllOperations();
+ BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "ns" << _ns << "name"
+ << "_id_");
+ // The call is interrupted because mayInterrupt == true.
+ ASSERT_TRUE(buildIndexInterrupted(indexInfo, true));
+ // only want to interrupt the index build
+ getGlobalEnvironment()->unsetKillAllOperations();
+ // The new index is not listed in the index catalog because the index build failed.
+ ASSERT(!coll->getIndexCatalog()->findIndexByName(&_txn, "_id_"));
+ }
+};
+
+/** Index creation is not killed when building the _id index if mayInterrupt is false. */
+class InsertBuildIdIndexInterruptDisallowed : public IndexBuildBase {
+public:
+ void run() {
+ // Recreate the collection as capped, without an _id index.
+ Database* db = _ctx.ctx().db();
+ Collection* coll;
+ {
+ WriteUnitOfWork wunit(&_txn);
+ db->dropCollection(&_txn, _ns);
+ CollectionOptions options;
+ options.capped = true;
+ options.cappedSize = 10 * 1024;
+ coll = db->createCollection(&_txn, _ns, options);
+ coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
// Insert some documents.
int32_t nDocs = 1000;
- for( int32_t i = 0; i < nDocs; ++i ) {
- _client.insert( _ns, BSON( "a" << i ) );
+ for (int32_t i = 0; i < nDocs; ++i) {
+ coll->insertDocument(&_txn, BSON("_id" << i), true);
}
- // Start with just _id
- ASSERT_EQUALS( 1U, _client.getIndexSpecs(_ns).size());
- // Initialize curop.
- _txn.getCurOp()->reset();
- // Request an interrupt.
- getGlobalEnvironment()->setKillAllOperations();
- // The call is not interrupted.
- Helpers::ensureIndex( &_txn, collection(), BSON( "a" << 1 ), false, "a_1" );
- // only want to interrupt the index build
- getGlobalEnvironment()->unsetKillAllOperations();
- // The new index is listed in getIndexSpecs because the index build completed.
- ASSERT_EQUALS( 2U, _client.getIndexSpecs(_ns).size());
+ wunit.commit();
}
- };
- // QUERY_MIGRATION
+ // Initialize curop.
+ _txn.getCurOp()->reset();
+ // Request an interrupt.
+ getGlobalEnvironment()->setKillAllOperations();
+ BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "ns" << _ns << "name"
+ << "_id_");
+ // The call is not interrupted because mayInterrupt == false.
+ ASSERT_FALSE(buildIndexInterrupted(indexInfo, false));
+ // only want to interrupt the index build
+ getGlobalEnvironment()->unsetKillAllOperations();
+ // The new index is listed in the index catalog because the index build succeeded.
+ ASSERT(coll->getIndexCatalog()->findIndexByName(&_txn, "_id_"));
+ }
+};
+
+/** Helpers::ensureIndex() is not interrupted. */
+class HelpersEnsureIndexInterruptDisallowed : public IndexBuildBase {
+public:
+ void run() {
+ // Insert some documents.
+ int32_t nDocs = 1000;
+ for (int32_t i = 0; i < nDocs; ++i) {
+ _client.insert(_ns, BSON("a" << i));
+ }
+ // Start with just _id
+ ASSERT_EQUALS(1U, _client.getIndexSpecs(_ns).size());
+ // Initialize curop.
+ _txn.getCurOp()->reset();
+ // Request an interrupt.
+ getGlobalEnvironment()->setKillAllOperations();
+ // The call is not interrupted.
+ Helpers::ensureIndex(&_txn, collection(), BSON("a" << 1), false, "a_1");
+ // only want to interrupt the index build
+ getGlobalEnvironment()->unsetKillAllOperations();
+ // The new index is listed in getIndexSpecs because the index build completed.
+ ASSERT_EQUALS(2U, _client.getIndexSpecs(_ns).size());
+ }
+};
+// QUERY_MIGRATION
#if 0
class IndexBuildInProgressTest : public IndexBuildBase {
public:
@@ -688,265 +696,239 @@ namespace IndexUpdateTests {
};
#endif
- Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& indexSpec) {
- MultiIndexBlock indexer(&_txn, collection());
- Status status = indexer.init(indexSpec);
- if (status == ErrorCodes::IndexAlreadyExists) {
- return Status::OK();
- }
- if (!status.isOK()) {
- return status;
- }
- status = indexer.insertAllDocumentsInCollection();
- if (!status.isOK()) {
- return status;
- }
- WriteUnitOfWork wunit(&_txn);
- indexer.commit();
- wunit.commit();
+Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& indexSpec) {
+ MultiIndexBlock indexer(&_txn, collection());
+ Status status = indexer.init(indexSpec);
+ if (status == ErrorCodes::IndexAlreadyExists) {
return Status::OK();
}
+ if (!status.isOK()) {
+ return status;
+ }
+ status = indexer.insertAllDocumentsInCollection();
+ if (!status.isOK()) {
+ return status;
+ }
+ WriteUnitOfWork wunit(&_txn);
+ indexer.commit();
+ wunit.commit();
+ return Status::OK();
+}
- /**
- * Fixture class that has a basic compound index.
- */
- class SimpleCompoundIndex: public IndexBuildBase {
- public:
- SimpleCompoundIndex() {
- ASSERT_OK(
- createIndex(
- "unittest",
- BSON("name" << "x"
- << "ns" << _ns
- << "key" << BSON("x" << 1 << "y" << 1))));
- }
- };
-
- class SameSpecDifferentOption: public SimpleCompoundIndex {
- public:
- void run() {
- // Cannot have same key spec with an option different from the existing one.
- ASSERT_EQUALS(
- ErrorCodes::IndexOptionsConflict,
- createIndex(
- "unittest",
- BSON("name" << "x"
- << "ns" << _ns
- << "unique" << true
- << "key" << BSON("x" << 1 << "y" << 1))));
- }
- };
-
- class SameSpecSameOptions: public SimpleCompoundIndex {
- public:
- void run() {
- ASSERT_OK(
- createIndex(
- "unittest",
- BSON("name" << "x"
- << "ns" << _ns
- << "key" << BSON("x" << 1 << "y" << 1))));
- }
- };
-
- class DifferentSpecSameName: public SimpleCompoundIndex {
- public:
- void run() {
- // Cannot create a different index with the same name as the existing one.
- ASSERT_EQUALS(
- ErrorCodes::IndexKeySpecsConflict,
- createIndex(
- "unittest",
- BSON("name" << "x"
- << "ns" << _ns
- << "key" << BSON("y" << 1 << "x" << 1))));
- }
- };
-
- /**
- * Fixture class for indexes with complex options.
- */
- class ComplexIndex: public IndexBuildBase {
- public:
- ComplexIndex() {
- ASSERT_OK(
- createIndex(
- "unittests",
- BSON("name" << "super"
- << "ns" << _ns
- << "unique" << 1
- << "sparse" << true
- << "expireAfterSeconds" << 3600
- << "key" << BSON("superIdx" << "2d"))));
- }
- };
-
- class SameSpecSameOptionDifferentOrder: public ComplexIndex {
- public:
- void run() {
- // Exactly the same specs with the existing one, only
- // specified in a different order than the original.
- ASSERT_OK(
- createIndex(
- "unittests",
- BSON("name" << "super2"
- << "ns" << _ns
- << "expireAfterSeconds" << 3600
- << "sparse" << true
- << "unique" << 1
- << "key" << BSON("superIdx" << "2d"))));
- }
- };
-
- // The following tests tries to create an index with almost the same
- // specs as the original, except for one option.
-
- class SameSpecDifferentUnique: public ComplexIndex {
- public:
- void run() {
- ASSERT_EQUALS(
- ErrorCodes::IndexOptionsConflict,
- createIndex(
- "unittest",
- BSON("name" << "super2"
- << "ns" << _ns
- << "unique" << false
- << "sparse" << true
- << "expireAfterSeconds" << 3600
- << "key" << BSON("superIdx" << "2d"))));
- }
- };
-
- class SameSpecDifferentSparse: public ComplexIndex {
- public:
- void run() {
- ASSERT_EQUALS(
- ErrorCodes::IndexOptionsConflict,
- createIndex(
- "unittest",
- BSON("name" << "super2"
- << "ns" << _ns
- << "unique" << 1
- << "sparse" << false
- << "background" << true
- << "expireAfterSeconds" << 3600
- << "key" << BSON("superIdx" << "2d"))));
- }
- };
+/**
+ * Fixture class that has a basic compound index.
+ */
+class SimpleCompoundIndex : public IndexBuildBase {
+public:
+ SimpleCompoundIndex() {
+ ASSERT_OK(createIndex("unittest",
+ BSON("name"
+ << "x"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1))));
+ }
+};
+
+class SameSpecDifferentOption : public SimpleCompoundIndex {
+public:
+ void run() {
+ // Cannot have same key spec with an option different from the existing one.
+ ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "x"
+ << "ns" << _ns << "unique" << true << "key"
+ << BSON("x" << 1 << "y" << 1))));
+ }
+};
+
+class SameSpecSameOptions : public SimpleCompoundIndex {
+public:
+ void run() {
+ ASSERT_OK(createIndex("unittest",
+ BSON("name"
+ << "x"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1))));
+ }
+};
+
+class DifferentSpecSameName : public SimpleCompoundIndex {
+public:
+ void run() {
+ // Cannot create a different index with the same name as the existing one.
+ ASSERT_EQUALS(ErrorCodes::IndexKeySpecsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "x"
+ << "ns" << _ns << "key" << BSON("y" << 1 << "x" << 1))));
+ }
+};
- class SameSpecDifferentTTL: public ComplexIndex {
- public:
- void run() {
- ASSERT_EQUALS(
- ErrorCodes::IndexOptionsConflict,
- createIndex(
- "unittest",
- BSON("name" << "super2"
- << "ns" << _ns
- << "unique" << 1
- << "sparse" << true
- << "expireAfterSeconds" << 2400
- << "key" << BSON("superIdx" << "2d"))));
- }
- };
+/**
+ * Fixture class for indexes with complex options.
+ */
+class ComplexIndex : public IndexBuildBase {
+public:
+ ComplexIndex() {
+ ASSERT_OK(createIndex("unittests",
+ BSON("name"
+ << "super"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key" << BSON("superIdx"
+ << "2d"))));
+ }
+};
+
+class SameSpecSameOptionDifferentOrder : public ComplexIndex {
+public:
+ void run() {
+ // Exactly the same specs with the existing one, only
+ // specified in a different order than the original.
+ ASSERT_OK(createIndex("unittests",
+ BSON("name"
+ << "super2"
+ << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
+ << true << "unique" << 1 << "key" << BSON("superIdx"
+ << "2d"))));
+ }
+};
+
+// The following tests tries to create an index with almost the same
+// specs as the original, except for one option.
+
+class SameSpecDifferentUnique : public ComplexIndex {
+public:
+ void run() {
+ ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "super2"
+ << "ns" << _ns << "unique" << false << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key" << BSON("superIdx"
+ << "2d"))));
+ }
+};
+
+class SameSpecDifferentSparse : public ComplexIndex {
+public:
+ void run() {
+ ASSERT_EQUALS(
+ ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "super2"
+ << "ns" << _ns << "unique" << 1 << "sparse" << false << "background"
+ << true << "expireAfterSeconds" << 3600 << "key" << BSON("superIdx"
+ << "2d"))));
+ }
+};
+
+class SameSpecDifferentTTL : public ComplexIndex {
+public:
+ void run() {
+ ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "super2"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 2400 << "key" << BSON("superIdx"
+ << "2d"))));
+ }
+};
- class StorageEngineOptions : public IndexBuildBase {
- public:
- void run() {
- // "storageEngine" field has to be an object if present.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(12345)));
+class StorageEngineOptions : public IndexBuildBase {
+public:
+ void run() {
+ // "storageEngine" field has to be an object if present.
+ ASSERT_NOT_OK(createIndex("unittest", _createSpec(12345)));
- // 'storageEngine' must not be empty.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSONObj())));
+ // 'storageEngine' must not be empty.
+ ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSONObj())));
- // Every field under "storageEngine" must match a registered storage engine.
- ASSERT_NOT_OK(createIndex("unittest",
- _createSpec(BSON("unknownEngine" << BSONObj()))));
+ // Every field under "storageEngine" must match a registered storage engine.
+ ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSON("unknownEngine" << BSONObj()))));
- // Testing with 'wiredTiger' because the registered storage engine factory
- // supports custom index options under 'storageEngine'.
- const std::string storageEngineName = "wiredTiger";
+ // Testing with 'wiredTiger' because the registered storage engine factory
+ // supports custom index options under 'storageEngine'.
+ const std::string storageEngineName = "wiredTiger";
- // Run 'wiredTiger' tests if the storage engine is supported.
- if (getGlobalEnvironment()->isRegisteredStorageEngine(storageEngineName)) {
- // Every field under "storageEngine" has to be an object.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSON(storageEngineName << 1))));
+ // Run 'wiredTiger' tests if the storage engine is supported.
+ if (getGlobalEnvironment()->isRegisteredStorageEngine(storageEngineName)) {
+ // Every field under "storageEngine" has to be an object.
+ ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSON(storageEngineName << 1))));
- // Storage engine options must pass validation by the storage engine factory.
- // For 'wiredTiger', embedded document must contain 'configString'.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(
- BSON(storageEngineName << BSON("unknown" << 1)))));
+ // Storage engine options must pass validation by the storage engine factory.
+ // For 'wiredTiger', embedded document must contain 'configString'.
+ ASSERT_NOT_OK(createIndex(
+ "unittest", _createSpec(BSON(storageEngineName << BSON("unknown" << 1)))));
- // Configuration string for 'wiredTiger' must be a string.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(
- BSON(storageEngineName << BSON("configString" << 1)))));
+ // Configuration string for 'wiredTiger' must be a string.
+ ASSERT_NOT_OK(createIndex(
+ "unittest", _createSpec(BSON(storageEngineName << BSON("configString" << 1)))));
- // Valid 'wiredTiger' configuration.
- ASSERT_OK(createIndex("unittest", _createSpec(
- BSON(storageEngineName << BSON("configString" << "block_compressor=zlib")))));
- }
- }
- protected:
- template <typename T>
- BSONObj _createSpec(T storageEngineValue) {
- return BSON("name" << "super2"
- << "ns" << _ns
- << "key" << BSON("a" << 1)
- << "storageEngine" << storageEngineValue);
+ // Valid 'wiredTiger' configuration.
+ ASSERT_OK(createIndex(
+ "unittest",
+ _createSpec(BSON(storageEngineName << BSON("configString"
+ << "block_compressor=zlib")))));
}
- };
-
- class IndexCatatalogFixIndexKey {
- public:
- void run() {
- ASSERT_EQUALS( BSON( "x" << 1 ),
- IndexCatalog::fixIndexKey( BSON( "x" << 1 ) ) );
+ }
- ASSERT_EQUALS( BSON( "_id" << 1 ),
- IndexCatalog::fixIndexKey( BSON( "_id" << 1 ) ) );
+protected:
+ template <typename T>
+ BSONObj _createSpec(T storageEngineValue) {
+ return BSON("name"
+ << "super2"
+ << "ns" << _ns << "key" << BSON("a" << 1) << "storageEngine"
+ << storageEngineValue);
+ }
+};
- ASSERT_EQUALS( BSON( "_id" << 1 ),
- IndexCatalog::fixIndexKey( BSON( "_id" << true ) ) );
- }
- };
+class IndexCatatalogFixIndexKey {
+public:
+ void run() {
+ ASSERT_EQUALS(BSON("x" << 1), IndexCatalog::fixIndexKey(BSON("x" << 1)));
- class IndexUpdateTests : public Suite {
- public:
- IndexUpdateTests() :
- Suite( "indexupdate" ) {
- }
+ ASSERT_EQUALS(BSON("_id" << 1), IndexCatalog::fixIndexKey(BSON("_id" << 1)));
- void setupTests() {
- //add<AddKeysToPhaseOne>();
- //add<InterruptAddKeysToPhaseOne>( false );
- //add<InterruptAddKeysToPhaseOne>( true );
- // QUERY_MIGRATION
- //add<BuildBottomUp>();
- //add<InterruptBuildBottomUp>( false );
- //add<InterruptBuildBottomUp>( true );
- add<InsertBuildIgnoreUnique<true> >();
- add<InsertBuildIgnoreUnique<false> >();
- add<InsertBuildEnforceUnique<true> >();
- add<InsertBuildEnforceUnique<false> >();
- add<InsertBuildFillDups<true> >();
- add<InsertBuildFillDups<false> >();
- add<InsertBuildIndexInterrupt>();
- add<InsertBuildIndexInterruptDisallowed>();
- add<InsertBuildIdIndexInterrupt>();
- add<InsertBuildIdIndexInterruptDisallowed>();
- add<HelpersEnsureIndexInterruptDisallowed>();
- //add<IndexBuildInProgressTest>();
- add<SameSpecDifferentOption>();
- add<SameSpecSameOptions>();
- add<DifferentSpecSameName>();
- add<SameSpecSameOptionDifferentOrder>();
- add<SameSpecDifferentUnique>();
- add<SameSpecDifferentSparse>();
- add<SameSpecDifferentTTL>();
- add<StorageEngineOptions>();
-
- add<IndexCatatalogFixIndexKey>();
- }
- } indexUpdateTests;
+ ASSERT_EQUALS(BSON("_id" << 1), IndexCatalog::fixIndexKey(BSON("_id" << true)));
+ }
+};
+
+class IndexUpdateTests : public Suite {
+public:
+ IndexUpdateTests() : Suite("indexupdate") {}
+
+ void setupTests() {
+ // add<AddKeysToPhaseOne>();
+ // add<InterruptAddKeysToPhaseOne>( false );
+ // add<InterruptAddKeysToPhaseOne>( true );
+ // QUERY_MIGRATION
+ // add<BuildBottomUp>();
+ // add<InterruptBuildBottomUp>( false );
+ // add<InterruptBuildBottomUp>( true );
+ add<InsertBuildIgnoreUnique<true>>();
+ add<InsertBuildIgnoreUnique<false>>();
+ add<InsertBuildEnforceUnique<true>>();
+ add<InsertBuildEnforceUnique<false>>();
+ add<InsertBuildFillDups<true>>();
+ add<InsertBuildFillDups<false>>();
+ add<InsertBuildIndexInterrupt>();
+ add<InsertBuildIndexInterruptDisallowed>();
+ add<InsertBuildIdIndexInterrupt>();
+ add<InsertBuildIdIndexInterruptDisallowed>();
+ add<HelpersEnsureIndexInterruptDisallowed>();
+ // add<IndexBuildInProgressTest>();
+ add<SameSpecDifferentOption>();
+ add<SameSpecSameOptions>();
+ add<DifferentSpecSameName>();
+ add<SameSpecSameOptionDifferentOrder>();
+ add<SameSpecDifferentUnique>();
+ add<SameSpecDifferentSparse>();
+ add<SameSpecDifferentTTL>();
+ add<StorageEngineOptions>();
+
+ add<IndexCatatalogFixIndexKey>();
+ }
+} indexUpdateTests;
-} // namespace IndexUpdateTests
+} // namespace IndexUpdateTests
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index 401f4648c80..e1f1a057d77 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -48,2259 +48,2381 @@
namespace mongo {
- using std::cout;
- using std::endl;
- using std::numeric_limits;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- typedef std::map<std::string, BSONElement> BSONMap;
- BSONMap bson2map(const BSONObj& obj) {
- BSONMap m;
- BSONObjIterator it(obj);
- while (it.more()) {
- BSONElement e = it.next();
- m[e.fieldName()] = e;
- }
- return m;
+using std::cout;
+using std::endl;
+using std::numeric_limits;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+typedef std::map<std::string, BSONElement> BSONMap;
+BSONMap bson2map(const BSONObj& obj) {
+ BSONMap m;
+ BSONObjIterator it(obj);
+ while (it.more()) {
+ BSONElement e = it.next();
+ m[e.fieldName()] = e;
}
+ return m;
+}
- void dotted2nested(BSONObjBuilder& b, const BSONObj& obj) {
- //use map to sort fields
- BSONMap sorted = bson2map(obj);
- EmbeddedBuilder eb(&b);
- for(BSONMap::const_iterator it=sorted.begin(); it!=sorted.end(); ++it) {
- eb.appendAs(it->second, it->first);
- }
- eb.done();
+void dotted2nested(BSONObjBuilder& b, const BSONObj& obj) {
+ // use map to sort fields
+ BSONMap sorted = bson2map(obj);
+ EmbeddedBuilder eb(&b);
+ for (BSONMap::const_iterator it = sorted.begin(); it != sorted.end(); ++it) {
+ eb.appendAs(it->second, it->first);
}
+ eb.done();
+}
- // {a.b:1} -> {a: {b:1}}
- BSONObj dotted2nested(const BSONObj& obj) {
- BSONObjBuilder b;
- dotted2nested(b, obj);
- return b.obj();
- }
+// {a.b:1} -> {a: {b:1}}
+BSONObj dotted2nested(const BSONObj& obj) {
+ BSONObjBuilder b;
+ dotted2nested(b, obj);
+ return b.obj();
+}
- // {a: {b:1}} -> {a.b:1}
- void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base="") {
- BSONObjIterator it(obj);
- while (it.more()) {
- BSONElement e = it.next();
- if (e.type() == Object) {
- string newbase = base + e.fieldName() + ".";
- nested2dotted(b, e.embeddedObject(), newbase);
- }
- else {
- string newbase = base + e.fieldName();
- b.appendAs(e, newbase);
- }
+// {a: {b:1}} -> {a.b:1}
+void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base = "") {
+ BSONObjIterator it(obj);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (e.type() == Object) {
+ string newbase = base + e.fieldName() + ".";
+ nested2dotted(b, e.embeddedObject(), newbase);
+ } else {
+ string newbase = base + e.fieldName();
+ b.appendAs(e, newbase);
}
}
+}
- BSONObj nested2dotted(const BSONObj& obj) {
- BSONObjBuilder b;
- nested2dotted(b, obj);
- return b.obj();
- }
-
- FieldCompareResult compareDottedFieldNames( const string& l , const string& r ,
- const LexNumCmp& cmp ) {
- static int maxLoops = 1024 * 1024;
+BSONObj nested2dotted(const BSONObj& obj) {
+ BSONObjBuilder b;
+ nested2dotted(b, obj);
+ return b.obj();
+}
- size_t lstart = 0;
- size_t rstart = 0;
+FieldCompareResult compareDottedFieldNames(const string& l, const string& r, const LexNumCmp& cmp) {
+ static int maxLoops = 1024 * 1024;
- for ( int i=0; i<maxLoops; i++ ) {
+ size_t lstart = 0;
+ size_t rstart = 0;
- size_t a = l.find( '.' , lstart );
- size_t b = r.find( '.' , rstart );
+ for (int i = 0; i < maxLoops; i++) {
+ size_t a = l.find('.', lstart);
+ size_t b = r.find('.', rstart);
- size_t lend = a == string::npos ? l.size() : a;
- size_t rend = b == string::npos ? r.size() : b;
+ size_t lend = a == string::npos ? l.size() : a;
+ size_t rend = b == string::npos ? r.size() : b;
- const string& c = l.substr( lstart , lend - lstart );
- const string& d = r.substr( rstart , rend - rstart );
+ const string& c = l.substr(lstart, lend - lstart);
+ const string& d = r.substr(rstart, rend - rstart);
- int x = cmp.cmp( c.c_str(), d.c_str() );
+ int x = cmp.cmp(c.c_str(), d.c_str());
- if ( x < 0 )
- return LEFT_BEFORE;
- if ( x > 0 )
- return RIGHT_BEFORE;
+ if (x < 0)
+ return LEFT_BEFORE;
+ if (x > 0)
+ return RIGHT_BEFORE;
- lstart = lend + 1;
- rstart = rend + 1;
+ lstart = lend + 1;
+ rstart = rend + 1;
- if ( lstart >= l.size() ) {
- if ( rstart >= r.size() )
- return SAME;
- return RIGHT_SUBFIELD;
- }
- if ( rstart >= r.size() )
- return LEFT_SUBFIELD;
+ if (lstart >= l.size()) {
+ if (rstart >= r.size())
+ return SAME;
+ return RIGHT_SUBFIELD;
}
-
- log() << "compareDottedFieldNames ERROR l: " << l << " r: " << r << " TOO MANY LOOPS" << endl;
- verify(0);
- return SAME; // will never get here
+ if (rstart >= r.size())
+ return LEFT_SUBFIELD;
}
-
+ log() << "compareDottedFieldNames ERROR l: " << l << " r: " << r << " TOO MANY LOOPS" << endl;
+ verify(0);
+ return SAME; // will never get here
+}
}
namespace JsobjTests {
- void keyTest(const BSONObj& o, bool mustBeCompact = false) {
- static KeyV1Owned *kLast;
- static BSONObj last;
+void keyTest(const BSONObj& o, bool mustBeCompact = false) {
+ static KeyV1Owned* kLast;
+ static BSONObj last;
- KeyV1Owned *key = new KeyV1Owned(o);
- KeyV1Owned& k = *key;
+ KeyV1Owned* key = new KeyV1Owned(o);
+ KeyV1Owned& k = *key;
- ASSERT( !mustBeCompact || k.isCompactFormat() );
+ ASSERT(!mustBeCompact || k.isCompactFormat());
- BSONObj x = k.toBson();
- int res = o.woCompare(x, BSONObj(), /*considerfieldname*/false);
- if( res ) {
- cout << o.toString() << endl;
- k.toBson();
- cout << x.toString() << endl;
- o.woCompare(x, BSONObj(), /*considerfieldname*/false);
- ASSERT( res == 0 );
- }
- ASSERT( k.woEqual(k) );
- ASSERT( !k.isCompactFormat() || k.dataSize() < o.objsize() );
-
- {
- // check BSONObj::equal. this part not a KeyV1 test.
- int res = o.woCompare(last);
- ASSERT( (res==0) == o.equal(last) );
- }
-
- if( kLast ) {
- int r1 = o.woCompare(last, BSONObj(), false);
- int r2 = k.woCompare(*kLast, Ordering::make(BSONObj()));
- bool ok = (r1<0 && r2<0) || (r1>0&&r2>0) || r1==r2;
- if( !ok ) {
- cout << "r1r2 " << r1 << ' ' << r2 << endl;
- cout << "o:" << o.toString() << endl;
- cout << "last:" << last.toString() << endl;
- cout << "k:" << k.toString() << endl;
- cout << "kLast:" << kLast->toString() << endl;
- int r3 = k.woCompare(*kLast, Ordering::make(BSONObj()));
- cout << r3 << endl;
- }
- ASSERT(ok);
- if( k.isCompactFormat() && kLast->isCompactFormat() ) { // only check if not bson as bson woEqual is broken! (or was may2011)
- if( k.woEqual(*kLast) != (r2 == 0) ) { // check woEqual matches
- cout << r2 << endl;
- cout << k.toString() << endl;
- cout << kLast->toString() << endl;
- k.woEqual(*kLast);
- ASSERT(false);
- }
- }
- }
+ BSONObj x = k.toBson();
+ int res = o.woCompare(x, BSONObj(), /*considerfieldname*/ false);
+ if (res) {
+ cout << o.toString() << endl;
+ k.toBson();
+ cout << x.toString() << endl;
+ o.woCompare(x, BSONObj(), /*considerfieldname*/ false);
+ ASSERT(res == 0);
+ }
+ ASSERT(k.woEqual(k));
+ ASSERT(!k.isCompactFormat() || k.dataSize() < o.objsize());
- delete kLast;
- kLast = key;
- last = o.getOwned();
+ {
+ // check BSONObj::equal. this part not a KeyV1 test.
+ int res = o.woCompare(last);
+ ASSERT((res == 0) == o.equal(last));
}
- class BufBuilderBasic {
- public:
- void run() {
- {
- BufBuilder b( 0 );
- b.appendStr( "foo" );
- ASSERT_EQUALS( 4, b.len() );
- ASSERT( strcmp( "foo", b.buf() ) == 0 );
- }
- {
- mongo::StackBufBuilder b;
- b.appendStr( "foo" );
- ASSERT_EQUALS( 4, b.len() );
- ASSERT( strcmp( "foo", b.buf() ) == 0 );
- }
+ if (kLast) {
+ int r1 = o.woCompare(last, BSONObj(), false);
+ int r2 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ bool ok = (r1 < 0 && r2 < 0) || (r1 > 0 && r2 > 0) || r1 == r2;
+ if (!ok) {
+ cout << "r1r2 " << r1 << ' ' << r2 << endl;
+ cout << "o:" << o.toString() << endl;
+ cout << "last:" << last.toString() << endl;
+ cout << "k:" << k.toString() << endl;
+ cout << "kLast:" << kLast->toString() << endl;
+ int r3 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ cout << r3 << endl;
}
- };
-
- class BufBuilderReallocLimit {
- public:
- void run() {
- BufBuilder b;
- unsigned int written = 0;
- try {
- for (; written <= 64 * 1024 * 1024 + 1; ++written)
- // (re)alloc past the buffer 64mb limit
- b.appendStr("a");
+ ASSERT(ok);
+ if (k.isCompactFormat() &&
+ kLast
+ ->isCompactFormat()) { // only check if not bson as bson woEqual is broken! (or was may2011)
+ if (k.woEqual(*kLast) != (r2 == 0)) { // check woEqual matches
+ cout << r2 << endl;
+ cout << k.toString() << endl;
+ cout << kLast->toString() << endl;
+ k.woEqual(*kLast);
+ ASSERT(false);
}
- catch (const AssertionException&) { }
- // assert half of max buffer size was allocated before exception is thrown
- ASSERT(written == mongo::BufferMaxSize / 2);
}
- };
+ }
- class BSONElementBasic {
- public:
- void run() {
- ASSERT_EQUALS( 1, BSONElement().size() );
+ delete kLast;
+ kLast = key;
+ last = o.getOwned();
+}
- BSONObj x;
- ASSERT_EQUALS( 1, x.firstElement().size() );
+class BufBuilderBasic {
+public:
+ void run() {
+ {
+ BufBuilder b(0);
+ b.appendStr("foo");
+ ASSERT_EQUALS(4, b.len());
+ ASSERT(strcmp("foo", b.buf()) == 0);
}
- };
-
- namespace BSONObjTests {
- class Create {
- public:
- void run() {
- BSONObj b;
- ASSERT_EQUALS( 0, b.nFields() );
- }
- };
-
- class Base {
- protected:
- static BSONObj basic( const char *name, int val ) {
- BSONObjBuilder b;
- b.append( name, val );
- return b.obj();
- }
- static BSONObj basic( const char *name, vector< int > val ) {
- BSONObjBuilder b;
- b.append( name, val );
- return b.obj();
- }
- template< class T >
- static BSONObj basic( const char *name, T val ) {
- BSONObjBuilder b;
- b.append( name, val );
- return b.obj();
- }
- };
-
- class WoCompareBasic : public Base {
- public:
- void run() {
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ) ) == 0 );
- ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ) ) > 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ) ) < 0 );
- // field name comparison
- ASSERT( basic( "a", 1 ).woCompare( basic( "b", 1 ) ) < 0 );
- }
- };
-
- class IsPrefixOf : public Base {
- public:
- void run() {
- {
- BSONObj k = BSON( "x" << 1 );
- verify( ! k.isPrefixOf( BSON( "a" << 1 ) ) );
- verify( k.isPrefixOf( BSON( "x" << 1 ) ) );
- verify( k.isPrefixOf( BSON( "x" << 1 << "a" << 1 ) ) );
- verify( ! k.isPrefixOf( BSON( "a" << 1 << "x" << 1 ) ) );
- }
- {
- BSONObj k = BSON( "x" << 1 << "y" << 1 );
- verify( ! k.isPrefixOf( BSON( "x" << 1 ) ) );
- verify( ! k.isPrefixOf( BSON( "x" << 1 << "z" << 1 ) ) );
- verify( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 ) ) );
- verify( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 << "z" << 1 ) ) );
- }
- {
- BSONObj k = BSON( "x" << 1 );
- verify( ! k.isPrefixOf( BSON( "x" << "hi" ) ) );
- verify( k.isPrefixOf( BSON( "x" << 1 << "a" << "hi" ) ) );
- }
- {
- BSONObj k = BSON( "x" << 1 );
- verify( k.isFieldNamePrefixOf( BSON( "x" << "hi" ) ) );
- verify( ! k.isFieldNamePrefixOf( BSON( "a" << 1 ) ) );
- }
- }
- };
-
- class NumericCompareBasic : public Base {
- public:
- void run() {
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1.0 ) ) == 0 );
- }
- };
-
- class WoCompareEmbeddedObject : public Base {
- public:
- void run() {
- ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
- ( basic( "a", basic( "b", 1.0 ) ) ) == 0 );
- ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
- ( basic( "a", basic( "b", 2 ) ) ) < 0 );
- }
- };
-
- class WoCompareEmbeddedArray : public Base {
- public:
- void run() {
- vector< int > i;
- i.push_back( 1 );
- i.push_back( 2 );
- vector< double > d;
- d.push_back( 1 );
- d.push_back( 2 );
- ASSERT( basic( "a", i ).woCompare( basic( "a", d ) ) == 0 );
-
- vector< int > j;
- j.push_back( 1 );
- j.push_back( 3 );
- ASSERT( basic( "a", i ).woCompare( basic( "a", j ) ) < 0 );
- }
- };
-
- class WoCompareOrdered : public Base {
- public:
- void run() {
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) == 0 );
- ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) > 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", 1 ) ) < 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) == 0 );
- ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) < 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", -1 ) ) > 0 );
- }
- };
+ {
+ mongo::StackBufBuilder b;
+ b.appendStr("foo");
+ ASSERT_EQUALS(4, b.len());
+ ASSERT(strcmp("foo", b.buf()) == 0);
+ }
+ }
+};
+
+class BufBuilderReallocLimit {
+public:
+ void run() {
+ BufBuilder b;
+ unsigned int written = 0;
+ try {
+ for (; written <= 64 * 1024 * 1024 + 1; ++written)
+ // (re)alloc past the buffer 64mb limit
+ b.appendStr("a");
+ } catch (const AssertionException&) {
+ }
+ // assert half of max buffer size was allocated before exception is thrown
+ ASSERT(written == mongo::BufferMaxSize / 2);
+ }
+};
- class WoCompareDifferentLength : public Base {
- public:
- void run() {
- ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << 1 << "b" << 1 ) ) < 0 );
- ASSERT( BSON( "a" << 1 << "b" << 1 ).woCompare( BSON( "a" << 1 ) ) > 0 );
- }
- };
+class BSONElementBasic {
+public:
+ void run() {
+ ASSERT_EQUALS(1, BSONElement().size());
- class WoSortOrder : public Base {
- public:
- void run() {
- ASSERT( BSON( "a" << 1 ).woSortOrder( BSON( "a" << 2 ), BSON( "b" << 1 << "a" << 1 ) ) < 0 );
- ASSERT( fromjson( "{a:null}" ).woSortOrder( BSON( "b" << 1 ), BSON( "a" << 1 ) ) == 0 );
- }
- };
-
- class MultiKeySortOrder : public Base {
- public:
- void run() {
- ASSERT( BSON( "x" << "a" ).woCompare( BSON( "x" << "b" ) ) < 0 );
- ASSERT( BSON( "x" << "b" ).woCompare( BSON( "x" << "a" ) ) > 0 );
+ BSONObj x;
+ ASSERT_EQUALS(1, x.firstElement().size());
+ }
+};
+
+namespace BSONObjTests {
+class Create {
+public:
+ void run() {
+ BSONObj b;
+ ASSERT_EQUALS(0, b.nFields());
+ }
+};
- ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "a" << "y" << "b" ) ) < 0 );
- ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "b" << "y" << "a" ) ) < 0 );
- ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "b" ) ) < 0 );
+class Base {
+protected:
+ static BSONObj basic(const char* name, int val) {
+ BSONObjBuilder b;
+ b.append(name, val);
+ return b.obj();
+ }
+ static BSONObj basic(const char* name, vector<int> val) {
+ BSONObjBuilder b;
+ b.append(name, val);
+ return b.obj();
+ }
+ template <class T>
+ static BSONObj basic(const char* name, T val) {
+ BSONObjBuilder b;
+ b.append(name, val);
+ return b.obj();
+ }
+};
+
+class WoCompareBasic : public Base {
+public:
+ void run() {
+ ASSERT(basic("a", 1).woCompare(basic("a", 1)) == 0);
+ ASSERT(basic("a", 2).woCompare(basic("a", 1)) > 0);
+ ASSERT(basic("a", 1).woCompare(basic("a", 2)) < 0);
+ // field name comparison
+ ASSERT(basic("a", 1).woCompare(basic("b", 1)) < 0);
+ }
+};
- ASSERT( BSON( "x" << "c" ).woCompare( BSON( "x" << "b" << "y" << "h" ) ) > 0 );
- ASSERT( BSON( "x" << "b" << "y" << "b" ).woCompare( BSON( "x" << "c" ) ) < 0 );
+class IsPrefixOf : public Base {
+public:
+ void run() {
+ {
+ BSONObj k = BSON("x" << 1);
+ verify(!k.isPrefixOf(BSON("a" << 1)));
+ verify(k.isPrefixOf(BSON("x" << 1)));
+ verify(k.isPrefixOf(BSON("x" << 1 << "a" << 1)));
+ verify(!k.isPrefixOf(BSON("a" << 1 << "x" << 1)));
+ }
+ {
+ BSONObj k = BSON("x" << 1 << "y" << 1);
+ verify(!k.isPrefixOf(BSON("x" << 1)));
+ verify(!k.isPrefixOf(BSON("x" << 1 << "z" << 1)));
+ verify(k.isPrefixOf(BSON("x" << 1 << "y" << 1)));
+ verify(k.isPrefixOf(BSON("x" << 1 << "y" << 1 << "z" << 1)));
+ }
+ {
+ BSONObj k = BSON("x" << 1);
+ verify(!k.isPrefixOf(BSON("x"
+ << "hi")));
+ verify(k.isPrefixOf(BSON("x" << 1 << "a"
+ << "hi")));
+ }
+ {
+ BSONObj k = BSON("x" << 1);
+ verify(k.isFieldNamePrefixOf(BSON("x"
+ << "hi")));
+ verify(!k.isFieldNamePrefixOf(BSON("a" << 1)));
+ }
+ }
+};
- BSONObj key = BSON( "x" << 1 << "y" << 1 );
+class NumericCompareBasic : public Base {
+public:
+ void run() {
+ ASSERT(basic("a", 1).woCompare(basic("a", 1.0)) == 0);
+ }
+};
- ASSERT( BSON( "x" << "c" ).woSortOrder( BSON( "x" << "b" << "y" << "h" ) , key ) > 0 );
- ASSERT( BSON( "x" << "b" << "y" << "b" ).woCompare( BSON( "x" << "c" ) , key ) < 0 );
+class WoCompareEmbeddedObject : public Base {
+public:
+ void run() {
+ ASSERT(basic("a", basic("b", 1)).woCompare(basic("a", basic("b", 1.0))) == 0);
+ ASSERT(basic("a", basic("b", 1)).woCompare(basic("a", basic("b", 2))) < 0);
+ }
+};
+
+class WoCompareEmbeddedArray : public Base {
+public:
+ void run() {
+ vector<int> i;
+ i.push_back(1);
+ i.push_back(2);
+ vector<double> d;
+ d.push_back(1);
+ d.push_back(2);
+ ASSERT(basic("a", i).woCompare(basic("a", d)) == 0);
+
+ vector<int> j;
+ j.push_back(1);
+ j.push_back(3);
+ ASSERT(basic("a", i).woCompare(basic("a", j)) < 0);
+ }
+};
+
+class WoCompareOrdered : public Base {
+public:
+ void run() {
+ ASSERT(basic("a", 1).woCompare(basic("a", 1), basic("a", 1)) == 0);
+ ASSERT(basic("a", 2).woCompare(basic("a", 1), basic("a", 1)) > 0);
+ ASSERT(basic("a", 1).woCompare(basic("a", 2), basic("a", 1)) < 0);
+ ASSERT(basic("a", 1).woCompare(basic("a", 1), basic("a", -1)) == 0);
+ ASSERT(basic("a", 2).woCompare(basic("a", 1), basic("a", -1)) < 0);
+ ASSERT(basic("a", 1).woCompare(basic("a", 2), basic("a", -1)) > 0);
+ }
+};
- key = BSON( "" << 1 << "" << 1 );
+class WoCompareDifferentLength : public Base {
+public:
+ void run() {
+ ASSERT(BSON("a" << 1).woCompare(BSON("a" << 1 << "b" << 1)) < 0);
+ ASSERT(BSON("a" << 1 << "b" << 1).woCompare(BSON("a" << 1)) > 0);
+ }
+};
- ASSERT( BSON( "" << "c" ).woSortOrder( BSON( "" << "b" << "" << "h" ) , key ) > 0 );
- ASSERT( BSON( "" << "b" << "" << "b" ).woCompare( BSON( "" << "c" ) , key ) < 0 );
+class WoSortOrder : public Base {
+public:
+ void run() {
+ ASSERT(BSON("a" << 1).woSortOrder(BSON("a" << 2), BSON("b" << 1 << "a" << 1)) < 0);
+ ASSERT(fromjson("{a:null}").woSortOrder(BSON("b" << 1), BSON("a" << 1)) == 0);
+ }
+};
+
+class MultiKeySortOrder : public Base {
+public:
+ void run() {
+ ASSERT(BSON("x"
+ << "a")
+ .woCompare(BSON("x"
+ << "b")) < 0);
+ ASSERT(BSON("x"
+ << "b")
+ .woCompare(BSON("x"
+ << "a")) > 0);
+
+ ASSERT(BSON("x"
+ << "a"
+ << "y"
+ << "a")
+ .woCompare(BSON("x"
+ << "a"
+ << "y"
+ << "b")) < 0);
+ ASSERT(BSON("x"
+ << "a"
+ << "y"
+ << "a")
+ .woCompare(BSON("x"
+ << "b"
+ << "y"
+ << "a")) < 0);
+ ASSERT(BSON("x"
+ << "a"
+ << "y"
+ << "a")
+ .woCompare(BSON("x"
+ << "b")) < 0);
+
+ ASSERT(BSON("x"
+ << "c")
+ .woCompare(BSON("x"
+ << "b"
+ << "y"
+ << "h")) > 0);
+ ASSERT(BSON("x"
+ << "b"
+ << "y"
+ << "b")
+ .woCompare(BSON("x"
+ << "c")) < 0);
+
+ BSONObj key = BSON("x" << 1 << "y" << 1);
+
+ ASSERT(BSON("x"
+ << "c")
+ .woSortOrder(BSON("x"
+ << "b"
+ << "y"
+ << "h"),
+ key) > 0);
+ ASSERT(BSON("x"
+ << "b"
+ << "y"
+ << "b")
+ .woCompare(BSON("x"
+ << "c"),
+ key) < 0);
+
+ key = BSON("" << 1 << "" << 1);
+
+ ASSERT(BSON(""
+ << "c")
+ .woSortOrder(BSON(""
+ << "b"
+ << ""
+ << "h"),
+ key) > 0);
+ ASSERT(BSON(""
+ << "b"
+ << ""
+ << "b")
+ .woCompare(BSON(""
+ << "c"),
+ key) < 0);
- {
- // test a big key
- string x(2000, 'z');
- BSONObj o = BSON( "q" << x );
- keyTest(o, false);
- }
- {
- string y(200, 'w');
- BSONObjBuilder b;
- for( int i = 0; i < 10; i++ ) {
- b.append("x", y);
- }
- keyTest(b.obj(), true);
- }
- {
- double nan = numeric_limits<double>::quiet_NaN();
- BSONObj o = BSON( "y" << nan );
- keyTest(o);
- }
+ {
+ // test a big key
+ string x(2000, 'z');
+ BSONObj o = BSON("q" << x);
+ keyTest(o, false);
+ }
+ {
+ string y(200, 'w');
+ BSONObjBuilder b;
+ for (int i = 0; i < 10; i++) {
+ b.append("x", y);
+ }
+ keyTest(b.obj(), true);
+ }
+ {
+ double nan = numeric_limits<double>::quiet_NaN();
+ BSONObj o = BSON("y" << nan);
+ keyTest(o);
+ }
- {
- BSONObjBuilder b;
- b.append( "" , "c" );
- b.appendNull( "" );
- BSONObj o = b.obj();
- keyTest(o);
- ASSERT( o.woSortOrder( BSON( "" << "b" << "" << "h" ) , key ) > 0 );
- ASSERT( BSON( "" << "b" << "" << "h" ).woSortOrder( o , key ) < 0 );
+ {
+ BSONObjBuilder b;
+ b.append("", "c");
+ b.appendNull("");
+ BSONObj o = b.obj();
+ keyTest(o);
+ ASSERT(o.woSortOrder(BSON(""
+ << "b"
+ << ""
+ << "h"),
+ key) > 0);
+ ASSERT(BSON(""
+ << "b"
+ << ""
+ << "h").woSortOrder(o, key) < 0);
+ }
- }
+ ASSERT(BSON(""
+ << "a")
+ .woCompare(BSON(""
+ << "a"
+ << ""
+ << "c")) < 0);
+ {
+ BSONObjBuilder b;
+ b.append("", "a");
+ b.appendNull("");
+ ASSERT(b.obj().woCompare(BSON(""
+ << "a"
+ << ""
+ << "c")) < 0); // SERVER-282
+ }
+ }
+};
- ASSERT( BSON( "" << "a" ).woCompare( BSON( "" << "a" << "" << "c" ) ) < 0 );
- {
- BSONObjBuilder b;
- b.append( "" , "a" );
- b.appendNull( "" );
- ASSERT( b.obj().woCompare( BSON( "" << "a" << "" << "c" ) ) < 0 ); // SERVER-282
- }
+class Nan : public Base {
+public:
+ void run() {
+ double inf = numeric_limits<double>::infinity();
+ double nan = numeric_limits<double>::quiet_NaN();
+ double nan2 = numeric_limits<double>::signaling_NaN();
+ ASSERT(isNaN(nan));
+ ASSERT(isNaN(nan2));
+ ASSERT(!isNaN(inf));
- }
- };
+ ASSERT(BSON("a" << inf).woCompare(BSON("a" << inf)) == 0);
+ ASSERT(BSON("a" << inf).woCompare(BSON("a" << 1)) > 0);
+ ASSERT(BSON("a" << 1).woCompare(BSON("a" << inf)) < 0);
- class Nan : public Base {
- public:
- void run() {
- double inf = numeric_limits< double >::infinity();
- double nan = numeric_limits< double >::quiet_NaN();
- double nan2 = numeric_limits< double >::signaling_NaN();
- ASSERT( isNaN(nan) );
- ASSERT( isNaN(nan2) );
- ASSERT( !isNaN(inf) );
+ ASSERT(BSON("a" << nan).woCompare(BSON("a" << nan)) == 0);
+ ASSERT(BSON("a" << nan).woCompare(BSON("a" << 1)) < 0);
- ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << inf ) ) == 0 );
- ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << 1 ) ) > 0 );
- ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << inf ) ) < 0 );
+ ASSERT(BSON("a" << nan).woCompare(BSON("a" << 5000000000LL)) < 0);
- ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan ) ) == 0 );
- ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 1 ) ) < 0 );
+ {
+ KeyV1Owned a(BSON("a" << nan));
+ KeyV1Owned b(BSON("a" << 1));
+ Ordering o = Ordering::make(BSON("a" << 1));
+ ASSERT(a.woCompare(b, o) < 0);
+ }
- ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 5000000000LL ) ) < 0 );
+ ASSERT(BSON("a" << 1).woCompare(BSON("a" << nan)) > 0);
- {
- KeyV1Owned a( BSON( "a" << nan ) );
- KeyV1Owned b( BSON( "a" << 1 ) );
- Ordering o = Ordering::make(BSON("a"<<1));
- ASSERT( a.woCompare(b, o) < 0 );
- }
+ ASSERT(BSON("a" << nan2).woCompare(BSON("a" << nan2)) == 0);
+ ASSERT(BSON("a" << nan2).woCompare(BSON("a" << 1)) < 0);
+ ASSERT(BSON("a" << 1).woCompare(BSON("a" << nan2)) > 0);
- ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan ) ) > 0 );
+ ASSERT(BSON("a" << inf).woCompare(BSON("a" << nan)) > 0);
+ ASSERT(BSON("a" << inf).woCompare(BSON("a" << nan2)) > 0);
+ ASSERT(BSON("a" << nan).woCompare(BSON("a" << nan2)) == 0);
+ }
+};
- ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << nan2 ) ) == 0 );
- ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << 1 ) ) < 0 );
- ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan2 ) ) > 0 );
+class AsTempObj {
+public:
+ void run() {
+ {
+ BSONObjBuilder bb;
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4 + (1 + 2 + 4) + 1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ bb << "b" << 2;
+ BSONObj obj = bb.obj();
+ ASSERT_EQUALS(obj.objsize(), 4 + (1 + 2 + 4) + (1 + 2 + 4) + 1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << 1 << "b" << 2));
+ }
+ {
+ BSONObjBuilder bb;
+ bb << "a" << GT << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4 + (1 + 2 + (4 + 1 + 4 + 4 + 1)) + 1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << BSON("$gt" << 1)));
+
+ bb << "b" << LT << 2;
+ BSONObj obj = bb.obj();
+ ASSERT(obj.objsize() ==
+ 4 + (1 + 2 + (4 + 1 + 4 + 4 + 1)) + (1 + 2 + (4 + 1 + 4 + 4 + 1)) + 1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << BSON("$gt" << 1) << "b" << BSON("$lt" << 2)));
+ }
+ {
+ BSONObjBuilder bb(32);
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4 + (1 + 2 + 4) + 1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ // force a realloc
+ BSONArrayBuilder arr;
+ for (int i = 0; i < 10000; i++) {
+ arr << i;
+ }
+ bb << "b" << arr.arr();
+ BSONObj obj = bb.obj();
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ }
+ }
+};
- ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan ) ) > 0 );
- ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan2 ) ) > 0 );
- ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan2 ) ) == 0 );
- }
- };
+struct AppendIntOrLL {
+ void run() {
+ const long long billion = 1000 * 1000 * 1000;
- class AsTempObj {
- public:
- void run() {
- {
- BSONObjBuilder bb;
- bb << "a" << 1;
- BSONObj tmp = bb.asTempObj();
- ASSERT(tmp.objsize() == 4+(1+2+4)+1);
- ASSERT(tmp.valid());
- ASSERT(tmp.hasField("a"));
- ASSERT(!tmp.hasField("b"));
- ASSERT(tmp == BSON("a" << 1));
-
- bb << "b" << 2;
- BSONObj obj = bb.obj();
- ASSERT_EQUALS(obj.objsize() , 4+(1+2+4)+(1+2+4)+1);
- ASSERT(obj.valid());
- ASSERT(obj.hasField("a"));
- ASSERT(obj.hasField("b"));
- ASSERT(obj == BSON("a" << 1 << "b" << 2));
- }
- {
- BSONObjBuilder bb;
- bb << "a" << GT << 1;
- BSONObj tmp = bb.asTempObj();
- ASSERT(tmp.objsize() == 4+(1+2+(4+1+4+4+1))+1);
- ASSERT(tmp.valid());
- ASSERT(tmp.hasField("a"));
- ASSERT(!tmp.hasField("b"));
- ASSERT(tmp == BSON("a" << BSON("$gt" << 1)));
-
- bb << "b" << LT << 2;
- BSONObj obj = bb.obj();
- ASSERT(obj.objsize() == 4+(1+2+(4+1+4+4+1))+(1+2+(4+1+4+4+1))+1);
- ASSERT(obj.valid());
- ASSERT(obj.hasField("a"));
- ASSERT(obj.hasField("b"));
- ASSERT(obj == BSON("a" << BSON("$gt" << 1)
- << "b" << BSON("$lt" << 2)));
- }
- {
- BSONObjBuilder bb(32);
- bb << "a" << 1;
- BSONObj tmp = bb.asTempObj();
- ASSERT(tmp.objsize() == 4+(1+2+4)+1);
- ASSERT(tmp.valid());
- ASSERT(tmp.hasField("a"));
- ASSERT(!tmp.hasField("b"));
- ASSERT(tmp == BSON("a" << 1));
-
- //force a realloc
- BSONArrayBuilder arr;
- for (int i=0; i < 10000; i++) {
- arr << i;
- }
- bb << "b" << arr.arr();
- BSONObj obj = bb.obj();
- ASSERT(obj.valid());
- ASSERT(obj.hasField("a"));
- ASSERT(obj.hasField("b"));
- }
- }
- };
+ long long n = 0x3333111122224444LL;
+ {
+ double d = (double)n;
+ BSONObj a = BSON("x" << n);
+ BSONObj b = BSON("x" << d);
- struct AppendIntOrLL {
- void run() {
- const long long billion = 1000*1000*1000;
+ long long back = (long long)d;
+ // 3719
+ ////// int res = a.woCompare(b);
- long long n = 0x3333111122224444LL;
- {
- double d = (double) n;
- BSONObj a = BSON( "x" << n );
- BSONObj b = BSON( "x" << d );
+ ASSERT(n > back);
+ // ASSERT( res > 0 ); // SERVER-3719
- long long back = (long long) d;
-//3719
-////// int res = a.woCompare(b);
+ keyTest(a, false);
- ASSERT( n > back );
- //ASSERT( res > 0 ); // SERVER-3719
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+ // 3719
+ ////// int res2 = A.woCompare(B, Ordering::make(BSONObj()));
+ // ASSERT( res2 > 0 ); // SERVER-3719
- keyTest(a, false);
+ // fixing requires an index v# change.
- KeyV1Owned A(a);
- KeyV1Owned B(b);
-//3719
-////// int res2 = A.woCompare(B, Ordering::make(BSONObj()));
- // ASSERT( res2 > 0 ); // SERVER-3719
+ cout << "todo fix SERVER-3719 and uncomment test in AppendIntOrLL" << endl;
- // fixing requires an index v# change.
+ n++;
+ }
- cout << "todo fix SERVER-3719 and uncomment test in AppendIntOrLL" << endl;
+ {
+ BSONObjBuilder b;
+ b.appendIntOrLL("L4", -4 * billion);
+ keyTest(b.obj());
+ keyTest(BSON("" << billion));
+ }
- n++;
- }
+ BSONObjBuilder b;
+ b.appendIntOrLL("i1", 1);
+ b.appendIntOrLL("i2", -1);
+ b.appendIntOrLL("i3", 1 * billion);
+ b.appendIntOrLL("i4", -1 * billion);
+
+ b.appendIntOrLL("L1", 2 * billion);
+ b.appendIntOrLL("L2", -2 * billion);
+ b.appendIntOrLL("L3", 4 * billion);
+ b.appendIntOrLL("L4", -4 * billion);
+ b.appendIntOrLL("L5", 16 * billion);
+ b.appendIntOrLL("L6", -16 * billion);
+
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT(o["i1"].type() == NumberInt);
+ ASSERT(o["i1"].number() == 1);
+ ASSERT(o["i2"].type() == NumberInt);
+ ASSERT(o["i2"].number() == -1);
+ ASSERT(o["i3"].type() == NumberInt);
+ ASSERT(o["i3"].number() == 1 * billion);
+ ASSERT(o["i4"].type() == NumberInt);
+ ASSERT(o["i4"].number() == -1 * billion);
+
+ ASSERT(o["L1"].isNumber());
+ ASSERT(o["L1"].number() == 2 * billion);
+ ASSERT(o["L2"].isNumber());
+ ASSERT(o["L2"].number() == -2 * billion);
+ ASSERT(o["L3"].type() == NumberLong);
+ ASSERT(o["L3"].number() == 4 * billion);
+ ASSERT(o["L4"].type() == NumberLong);
+ ASSERT(o["L4"].number() == -4 * billion);
+ ASSERT(o["L5"].type() == NumberLong);
+ ASSERT(o["L5"].number() == 16 * billion);
+ ASSERT(o["L6"].type() == NumberLong);
+ ASSERT(o["L6"].number() == -16 * billion);
+ }
+};
- {
- BSONObjBuilder b;
- b.appendIntOrLL("L4", -4*billion);
- keyTest(b.obj());
- keyTest( BSON("" << billion) );
- }
+struct AppendNumber {
+ void run() {
+ BSONObjBuilder b;
+ b.appendNumber("a", 5);
+ b.appendNumber("b", 5.5);
+ b.appendNumber("c", (1024LL * 1024 * 1024) - 1);
+ b.appendNumber("d", (1024LL * 1024 * 1024 * 1024) - 1);
+ b.appendNumber("e", 1024LL * 1024 * 1024 * 1024 * 1024 * 1024);
+
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT(o["a"].type() == NumberInt);
+ ASSERT(o["b"].type() == NumberDouble);
+ ASSERT(o["c"].type() == NumberInt);
+ ASSERT(o["d"].type() == NumberDouble);
+ ASSERT(o["e"].type() == NumberLong);
+ }
+};
+
+class ToStringArray {
+public:
+ void run() {
+ string spec = "{ a: [ \"a\", \"b\" ] }";
+ ASSERT_EQUALS(spec, fromjson(spec).toString());
+
+ BSONObj x = BSON("a"
+ << "astring"
+ << "b"
+ << "str");
+ keyTest(x);
+ keyTest(x);
+ BSONObj y = BSON("a"
+ << "astring"
+ << "b"
+ << "stra");
+ keyTest(y);
+ y = BSON("a"
+ << "");
+ keyTest(y);
+
+ keyTest(BSON("abc" << true));
+ keyTest(BSON("abc" << false));
+ keyTest(BSON("abc" << false << "b" << true));
+
+ Date_t now = jsTime();
+ keyTest(BSON("" << now << "" << 3 << "" << jstNULL << "" << true));
+ keyTest(BSON("" << now << "" << 3 << "" << BSONObj() << "" << true));
- BSONObjBuilder b;
- b.appendIntOrLL("i1", 1);
- b.appendIntOrLL("i2", -1);
- b.appendIntOrLL("i3", 1*billion);
- b.appendIntOrLL("i4", -1*billion);
-
- b.appendIntOrLL("L1", 2*billion);
- b.appendIntOrLL("L2", -2*billion);
- b.appendIntOrLL("L3", 4*billion);
- b.appendIntOrLL("L4", -4*billion);
- b.appendIntOrLL("L5", 16*billion);
- b.appendIntOrLL("L6", -16*billion);
-
- BSONObj o = b.obj();
- keyTest(o);
-
- ASSERT(o["i1"].type() == NumberInt);
- ASSERT(o["i1"].number() == 1);
- ASSERT(o["i2"].type() == NumberInt);
- ASSERT(o["i2"].number() == -1);
- ASSERT(o["i3"].type() == NumberInt);
- ASSERT(o["i3"].number() == 1*billion);
- ASSERT(o["i4"].type() == NumberInt);
- ASSERT(o["i4"].number() == -1*billion);
-
- ASSERT(o["L1"].isNumber());
- ASSERT(o["L1"].number() == 2*billion);
- ASSERT(o["L2"].isNumber());
- ASSERT(o["L2"].number() == -2*billion);
- ASSERT(o["L3"].type() == NumberLong);
- ASSERT(o["L3"].number() == 4*billion);
- ASSERT(o["L4"].type() == NumberLong);
- ASSERT(o["L4"].number() == -4*billion);
- ASSERT(o["L5"].type() == NumberLong);
- ASSERT(o["L5"].number() == 16*billion);
- ASSERT(o["L6"].type() == NumberLong);
- ASSERT(o["L6"].number() == -16*billion);
+ {
+ {
+ // check signed dates with new key format
+ KeyV1Owned a(BSONObjBuilder().appendDate("", -50).obj());
+ KeyV1Owned b(BSONObjBuilder().appendDate("", 50).obj());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) < 0);
}
- };
-
- struct AppendNumber {
- void run() {
- BSONObjBuilder b;
- b.appendNumber( "a" , 5 );
- b.appendNumber( "b" , 5.5 );
- b.appendNumber( "c" , (1024LL*1024*1024)-1 );
- b.appendNumber( "d" , (1024LL*1024*1024*1024)-1 );
- b.appendNumber( "e" , 1024LL*1024*1024*1024*1024*1024 );
-
- BSONObj o = b.obj();
- keyTest(o);
-
- ASSERT( o["a"].type() == NumberInt );
- ASSERT( o["b"].type() == NumberDouble );
- ASSERT( o["c"].type() == NumberInt );
- ASSERT( o["d"].type() == NumberDouble );
- ASSERT( o["e"].type() == NumberLong );
-
+ {
+ // backward compatibility
+ KeyBson a(BSONObjBuilder().appendDate("", -50).obj());
+ KeyBson b(BSONObjBuilder().appendDate("", 50).obj());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) > 0);
}
- };
-
- class ToStringArray {
- public:
- void run() {
- string spec = "{ a: [ \"a\", \"b\" ] }";
- ASSERT_EQUALS( spec, fromjson( spec ).toString() );
-
- BSONObj x = BSON( "a" << "astring" << "b" << "str" );
- keyTest(x);
- keyTest(x);
- BSONObj y = BSON( "a" << "astring" << "b" << "stra" );
- keyTest(y);
- y = BSON( "a" << "" );
- keyTest(y);
-
- keyTest( BSON("abc" << true ) );
- keyTest( BSON("abc" << false ) );
- keyTest( BSON("abc" << false << "b" << true ) );
-
- Date_t now = jsTime();
- keyTest( BSON("" << now << "" << 3 << "" << jstNULL << "" << true) );
- keyTest( BSON("" << now << "" << 3 << "" << BSONObj() << "" << true) );
-
- {
- {
- // check signed dates with new key format
- KeyV1Owned a( BSONObjBuilder().appendDate("", -50).obj() );
- KeyV1Owned b( BSONObjBuilder().appendDate("", 50).obj() );
- ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
- }
- {
- // backward compatibility
- KeyBson a( BSONObjBuilder().appendDate("", -50).obj() );
- KeyBson b( BSONObjBuilder().appendDate("", 50).obj() );
- ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
- }
- {
- // this is an uncompactable key:
- BSONObj uc1 = BSONObjBuilder().appendDate("", -50).appendCode("", "abc").obj();
- BSONObj uc2 = BSONObjBuilder().appendDate("", 55).appendCode("", "abc").obj();
- ASSERT( uc1.woCompare(uc2, Ordering::make(BSONObj())) < 0 );
- {
- KeyV1Owned a(uc1);
- KeyV1Owned b(uc2);
- ASSERT( !a.isCompactFormat() );
- ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
- }
- {
- KeyBson a(uc1);
- KeyBson b(uc2);
- ASSERT( !a.isCompactFormat() );
- ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
- }
- }
- }
-
- {
- BSONObjBuilder b;
- b.appendBinData("f", 8, (BinDataType) 1, "aaaabbbb");
- b.appendBinData("e", 3, (BinDataType) 1, "aaa");
- b.appendBinData("b", 1, (BinDataType) 1, "x");
- BSONObj o = b.obj();
- keyTest( o, true );
- }
-
+ {
+ // this is an uncompactable key:
+ BSONObj uc1 = BSONObjBuilder().appendDate("", -50).appendCode("", "abc").obj();
+ BSONObj uc2 = BSONObjBuilder().appendDate("", 55).appendCode("", "abc").obj();
+ ASSERT(uc1.woCompare(uc2, Ordering::make(BSONObj())) < 0);
{
- // check (non)equality
- BSONObj a = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgh").obj();
- BSONObj b = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgj").obj();
- ASSERT( !a.equal(b) );
- int res_ab = a.woCompare(b);
- ASSERT( res_ab != 0 );
- keyTest( a, true );
- keyTest( b, true );
-
- // check subtypes do not equal
- BSONObj c = BSONObjBuilder().appendBinData("", 8, (BinDataType) 4, "abcdefgh").obj();
- BSONObj d = BSONObjBuilder().appendBinData("", 8, (BinDataType) 0x81, "abcdefgh").obj();
- ASSERT( !a.equal(c) );
- int res_ac = a.woCompare(c);
- ASSERT( res_ac != 0 );
- keyTest( c, true );
- ASSERT( !a.equal(d) );
- int res_ad = a.woCompare(d);
- ASSERT( res_ad != 0 );
- keyTest( d, true );
-
- KeyV1Owned A(a);
- KeyV1Owned B(b);
- KeyV1Owned C(c);
- KeyV1Owned D(d);
- ASSERT( !A.woEqual(B) );
- ASSERT( A.woCompare(B, Ordering::make(BSONObj())) < 0 && res_ab < 0 );
- ASSERT( !A.woEqual(C) );
- ASSERT( A.woCompare(C, Ordering::make(BSONObj())) < 0 && res_ac < 0 );
- ASSERT( !A.woEqual(D) );
- ASSERT( A.woCompare(D, Ordering::make(BSONObj())) < 0 && res_ad < 0 );
+ KeyV1Owned a(uc1);
+ KeyV1Owned b(uc2);
+ ASSERT(!a.isCompactFormat());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) < 0);
}
-
{
- BSONObjBuilder b;
- b.appendBinData("f", 33, (BinDataType) 1, "123456789012345678901234567890123");
- BSONObj o = b.obj();
- keyTest( o, false );
+ KeyBson a(uc1);
+ KeyBson b(uc2);
+ ASSERT(!a.isCompactFormat());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) > 0);
}
+ }
+ }
- {
- for( int i = 1; i <= 3; i++ ) {
- for( int j = 1; j <= 3; j++ ) {
- BSONObjBuilder b;
- b.appendBinData("f", i, (BinDataType) j, "abc");
- BSONObj o = b.obj();
- keyTest( o, j != ByteArrayDeprecated );
- }
- }
- }
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 8, (BinDataType)1, "aaaabbbb");
+ b.appendBinData("e", 3, (BinDataType)1, "aaa");
+ b.appendBinData("b", 1, (BinDataType)1, "x");
+ BSONObj o = b.obj();
+ keyTest(o, true);
+ }
- {
- BSONObjBuilder b;
- b.appendBinData("f", 1, (BinDataType) 133, "a");
- BSONObj o = b.obj();
- keyTest( o, true );
- }
+ {
+ // check (non)equality
+ BSONObj a = BSONObjBuilder().appendBinData("", 8, (BinDataType)1, "abcdefgh").obj();
+ BSONObj b = BSONObjBuilder().appendBinData("", 8, (BinDataType)1, "abcdefgj").obj();
+ ASSERT(!a.equal(b));
+ int res_ab = a.woCompare(b);
+ ASSERT(res_ab != 0);
+ keyTest(a, true);
+ keyTest(b, true);
+
+ // check subtypes do not equal
+ BSONObj c = BSONObjBuilder().appendBinData("", 8, (BinDataType)4, "abcdefgh").obj();
+ BSONObj d = BSONObjBuilder().appendBinData("", 8, (BinDataType)0x81, "abcdefgh").obj();
+ ASSERT(!a.equal(c));
+ int res_ac = a.woCompare(c);
+ ASSERT(res_ac != 0);
+ keyTest(c, true);
+ ASSERT(!a.equal(d));
+ int res_ad = a.woCompare(d);
+ ASSERT(res_ad != 0);
+ keyTest(d, true);
+
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+ KeyV1Owned C(c);
+ KeyV1Owned D(d);
+ ASSERT(!A.woEqual(B));
+ ASSERT(A.woCompare(B, Ordering::make(BSONObj())) < 0 && res_ab < 0);
+ ASSERT(!A.woEqual(C));
+ ASSERT(A.woCompare(C, Ordering::make(BSONObj())) < 0 && res_ac < 0);
+ ASSERT(!A.woEqual(D));
+ ASSERT(A.woCompare(D, Ordering::make(BSONObj())) < 0 && res_ad < 0);
+ }
- {
- BSONObjBuilder b;
- b.append("AA", 3);
- b.appendBinData("f", 0, (BinDataType) 0, "");
- b.appendBinData("e", 3, (BinDataType) 7, "aaa");
- b.appendBinData("b", 1, (BinDataType) 128, "x");
- b.append("z", 3);
- b.appendBinData("bb", 0, (BinDataType) 129, "x");
- BSONObj o = b.obj();
- keyTest( o, true );
- }
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 33, (BinDataType)1, "123456789012345678901234567890123");
+ BSONObj o = b.obj();
+ keyTest(o, false);
+ }
- {
- // 9 is not supported in compact format. so test a non-compact case here.
+ {
+ for (int i = 1; i <= 3; i++) {
+ for (int j = 1; j <= 3; j++) {
BSONObjBuilder b;
- b.appendBinData("f", 9, (BinDataType) 0, "aaaabbbbc");
+ b.appendBinData("f", i, (BinDataType)j, "abc");
BSONObj o = b.obj();
- keyTest( o );
+ keyTest(o, j != ByteArrayDeprecated);
}
- }
- };
-
- class ToStringNumber {
- public:
+ }
+ }
- void run() {
- BSONObjBuilder b;
- b.append( "a" , (int)4 );
- b.append( "b" , (double)5 );
- b.append( "c" , (long long)6 );
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 1, (BinDataType)133, "a");
+ BSONObj o = b.obj();
+ keyTest(o, true);
+ }
- b.append( "d" , 123.456789123456789123456789123456789 );
- b.append( "e" , 123456789.123456789123456789123456789 );
- b.append( "f" , 1234567891234567891234.56789123456789 );
+ {
+ BSONObjBuilder b;
+ b.append("AA", 3);
+ b.appendBinData("f", 0, (BinDataType)0, "");
+ b.appendBinData("e", 3, (BinDataType)7, "aaa");
+ b.appendBinData("b", 1, (BinDataType)128, "x");
+ b.append("z", 3);
+ b.appendBinData("bb", 0, (BinDataType)129, "x");
+ BSONObj o = b.obj();
+ keyTest(o, true);
+ }
- b.append( "g" , -123.456 );
+ {
+ // 9 is not supported in compact format. so test a non-compact case here.
+ BSONObjBuilder b;
+ b.appendBinData("f", 9, (BinDataType)0, "aaaabbbbc");
+ BSONObj o = b.obj();
+ keyTest(o);
+ }
+ }
+};
- b.append( "h" , 0.0 );
- b.append( "i" , -0.0 );
+class ToStringNumber {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", (int)4);
+ b.append("b", (double)5);
+ b.append("c", (long long)6);
- BSONObj x = b.obj();
- keyTest(x);
+ b.append("d", 123.456789123456789123456789123456789);
+ b.append("e", 123456789.123456789123456789123456789);
+ b.append("f", 1234567891234567891234.56789123456789);
- ASSERT_EQUALS( "4", x["a"].toString( false , true ) );
- ASSERT_EQUALS( "5.0", x["b"].toString( false , true ) );
- ASSERT_EQUALS( "6", x["c"].toString( false , true ) );
+ b.append("g", -123.456);
- ASSERT_EQUALS( "123.4567891234568" , x["d"].toString( false , true ) );
- ASSERT_EQUALS( "123456789.1234568" , x["e"].toString( false , true ) );
- // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
+ b.append("h", 0.0);
+ b.append("i", -0.0);
- ASSERT_EQUALS( "-123.456" , x["g"].toString( false , true ) );
+ BSONObj x = b.obj();
+ keyTest(x);
- ASSERT_EQUALS( "0.0" , x["h"].toString( false , true ) );
- ASSERT_EQUALS( "-0.0" , x["i"].toString( false , true ) );
- }
- };
+ ASSERT_EQUALS("4", x["a"].toString(false, true));
+ ASSERT_EQUALS("5.0", x["b"].toString(false, true));
+ ASSERT_EQUALS("6", x["c"].toString(false, true));
- class NullString {
- public:
- void run() {
- {
- BSONObjBuilder b;
- const char x[] = {'a', 0, 'b', 0};
- b.append("field", x, 4);
- b.append("z", true);
- BSONObj B = b.obj();
- //cout << B.toString() << endl;
-
- BSONObjBuilder a;
- const char xx[] = {'a', 0, 'c', 0};
- a.append("field", xx, 4);
- a.append("z", true);
- BSONObj A = a.obj();
-
- BSONObjBuilder c;
- const char xxx[] = {'a', 0, 'c', 0, 0};
- c.append("field", xxx, 5);
- c.append("z", true);
- BSONObj C = c.obj();
-
- // test that nulls are ok within bson strings
- ASSERT( !(A == B) );
- ASSERT( A > B );
-
- ASSERT( !(B == C) );
- ASSERT( C > B );
-
- // check iteration is ok
- ASSERT( B["z"].Bool() && A["z"].Bool() && C["z"].Bool() );
- }
+ ASSERT_EQUALS("123.4567891234568", x["d"].toString(false, true));
+ ASSERT_EQUALS("123456789.1234568", x["e"].toString(false, true));
+ // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
- BSONObjBuilder b;
- b.append("a", "a\0b", 4);
- string z("a\0b", 3);
- b.append("b", z);
- b.appendAs(b.asTempObj()["a"], "c");
- BSONObj o = b.obj();
- keyTest(o);
+ ASSERT_EQUALS("-123.456", x["g"].toString(false, true));
- stringstream ss;
- ss << 'a' << '\0' << 'b';
+ ASSERT_EQUALS("0.0", x["h"].toString(false, true));
+ ASSERT_EQUALS("-0.0", x["i"].toString(false, true));
+ }
+};
- ASSERT_EQUALS(o["a"].valuestrsize(), 3+1);
- ASSERT_EQUALS(o["a"].str(), ss.str());
+class NullString {
+public:
+ void run() {
+ {
+ BSONObjBuilder b;
+ const char x[] = {'a', 0, 'b', 0};
+ b.append("field", x, 4);
+ b.append("z", true);
+ BSONObj B = b.obj();
+ // cout << B.toString() << endl;
- ASSERT_EQUALS(o["b"].valuestrsize(), 3+1);
- ASSERT_EQUALS(o["b"].str(), ss.str());
+ BSONObjBuilder a;
+ const char xx[] = {'a', 0, 'c', 0};
+ a.append("field", xx, 4);
+ a.append("z", true);
+ BSONObj A = a.obj();
+
+ BSONObjBuilder c;
+ const char xxx[] = {'a', 0, 'c', 0, 0};
+ c.append("field", xxx, 5);
+ c.append("z", true);
+ BSONObj C = c.obj();
+
+ // test that nulls are ok within bson strings
+ ASSERT(!(A == B));
+ ASSERT(A > B);
+
+ ASSERT(!(B == C));
+ ASSERT(C > B);
+
+ // check iteration is ok
+ ASSERT(B["z"].Bool() && A["z"].Bool() && C["z"].Bool());
+ }
- ASSERT_EQUALS(o["c"].valuestrsize(), 3+1);
- ASSERT_EQUALS(o["c"].str(), ss.str());
+ BSONObjBuilder b;
+ b.append("a", "a\0b", 4);
+ string z("a\0b", 3);
+ b.append("b", z);
+ b.appendAs(b.asTempObj()["a"], "c");
+ BSONObj o = b.obj();
+ keyTest(o);
- }
+ stringstream ss;
+ ss << 'a' << '\0' << 'b';
- };
+ ASSERT_EQUALS(o["a"].valuestrsize(), 3 + 1);
+ ASSERT_EQUALS(o["a"].str(), ss.str());
- class AppendAs {
- public:
- void run() {
- BSONObjBuilder b;
- {
- BSONObj foo = BSON( "foo" << 1 );
- b.appendAs( foo.firstElement(), "bar" );
- }
- ASSERT_EQUALS( BSON( "bar" << 1 ), b.done() );
- }
- };
-
- class GetField {
- public:
- void run(){
- BSONObj o = BSON( "a" << 1 <<
- "b" << BSON( "a" << 2 ) <<
- "c" << BSON_ARRAY( BSON( "a" << 3 ) << BSON( "a" << 4 ) ) );
- ASSERT_EQUALS( 1 , o.getFieldDotted( "a" ).numberInt() );
- ASSERT_EQUALS( 2 , o.getFieldDotted( "b.a" ).numberInt() );
- ASSERT_EQUALS( 3 , o.getFieldDotted( "c.0.a" ).numberInt() );
- ASSERT_EQUALS( 4 , o.getFieldDotted( "c.1.a" ).numberInt() );
- ASSERT( o.getFieldDotted( "x" ).eoo() );
- ASSERT( o.getFieldDotted( "a.x" ).eoo() );
- ASSERT( o.getFieldDotted( "x.y" ).eoo() );
- ASSERT( o.getFieldDotted( "" ).eoo() );
- ASSERT( o.getFieldDotted( "." ).eoo() );
- ASSERT( o.getFieldDotted( ".." ).eoo() );
- ASSERT( o.getFieldDotted( "..." ).eoo() );
- ASSERT( o.getFieldDotted( "a." ).eoo() );
- ASSERT( o.getFieldDotted( ".a" ).eoo() );
- ASSERT( o.getFieldDotted( "b.a." ).eoo() );
- keyTest(o);
- }
- };
-
- class ToStringRecursionDepth {
- public:
- // create a nested BSON object with the specified recursion depth
- BSONObj recursiveBSON( int depth ) {
- BSONObjBuilder b;
- if ( depth==0 ) {
- b << "name" << "Joe";
- return b.obj();
- }
- b.append( "test", recursiveBSON( depth - 1) );
- return b.obj();
- }
+ ASSERT_EQUALS(o["b"].valuestrsize(), 3 + 1);
+ ASSERT_EQUALS(o["b"].str(), ss.str());
- void run() {
- BSONObj nestedBSON;
- StringBuilder s;
- string nestedBSONString;
- size_t found;
-
- // recursion depth one less than max allowed-- do not shorten the string
- nestedBSON = recursiveBSON( BSONObj::maxToStringRecursionDepth - 1 );
- nestedBSON.toString( s, true, false );
- nestedBSONString = s.str();
- found = nestedBSONString.find( "..." );
- // did not find the "..." pattern
- ASSERT_EQUALS( found!=string::npos, false );
-
- // recursion depth is equal to max allowed -- do not shorten the string
- nestedBSON = recursiveBSON( BSONObj::maxToStringRecursionDepth );
- nestedBSON.toString( s, true, false );
- nestedBSONString = s.str();
- found = nestedBSONString.find( "..." );
- // did not find the "..." pattern
- ASSERT_EQUALS( found!=string::npos, false );
-
- // recursion depth - one greater than max allowed -- shorten the string
- nestedBSON = recursiveBSON( BSONObj::maxToStringRecursionDepth + 1 );
- nestedBSON.toString( s, false, false );
- nestedBSONString = s.str();
- found = nestedBSONString.find( "..." );
- // found the "..." pattern
- ASSERT_EQUALS( found!=string::npos, true );
-
- /* recursion depth - one greater than max allowed but with full=true
- * should fail with an assertion
- */
- nestedBSON = recursiveBSON( BSONObj::maxToStringRecursionDepth + 1 );
- ASSERT_THROWS( nestedBSON.toString( s, false, true ) , UserException );
- }
- };
-
- class StringWithNull {
- public:
- void run() {
- const string input = string("a") + '\0' + 'b';
- ASSERT_EQUALS(input.size(), 3U);
-
- BSONObj obj = BSON("str" << input);
- const string output = obj.firstElement().String();
- ASSERT_EQUALS(escape(output), escape(input)); // for better failure output
- ASSERT_EQUALS(output, input);
- }
- };
+ ASSERT_EQUALS(o["c"].valuestrsize(), 3 + 1);
+ ASSERT_EQUALS(o["c"].str(), ss.str());
+ }
+};
- namespace Validation {
+class AppendAs {
+public:
+ void run() {
+ BSONObjBuilder b;
+ {
+ BSONObj foo = BSON("foo" << 1);
+ b.appendAs(foo.firstElement(), "bar");
+ }
+ ASSERT_EQUALS(BSON("bar" << 1), b.done());
+ }
+};
+
+class GetField {
+public:
+ void run() {
+ BSONObj o = BSON("a" << 1 << "b" << BSON("a" << 2) << "c"
+ << BSON_ARRAY(BSON("a" << 3) << BSON("a" << 4)));
+ ASSERT_EQUALS(1, o.getFieldDotted("a").numberInt());
+ ASSERT_EQUALS(2, o.getFieldDotted("b.a").numberInt());
+ ASSERT_EQUALS(3, o.getFieldDotted("c.0.a").numberInt());
+ ASSERT_EQUALS(4, o.getFieldDotted("c.1.a").numberInt());
+ ASSERT(o.getFieldDotted("x").eoo());
+ ASSERT(o.getFieldDotted("a.x").eoo());
+ ASSERT(o.getFieldDotted("x.y").eoo());
+ ASSERT(o.getFieldDotted("").eoo());
+ ASSERT(o.getFieldDotted(".").eoo());
+ ASSERT(o.getFieldDotted("..").eoo());
+ ASSERT(o.getFieldDotted("...").eoo());
+ ASSERT(o.getFieldDotted("a.").eoo());
+ ASSERT(o.getFieldDotted(".a").eoo());
+ ASSERT(o.getFieldDotted("b.a.").eoo());
+ keyTest(o);
+ }
+};
- class Base {
- public:
- virtual ~Base() {}
- void run() {
- ASSERT( valid().valid() );
- ASSERT( !invalid().valid() );
- }
- protected:
- virtual BSONObj valid() const { return BSONObj(); }
- virtual BSONObj invalid() const { return BSONObj(); }
- static char get( const BSONObj &o, int i ) {
- return o.objdata()[ i ];
- }
- static void set( BSONObj &o, int i, char c ) {
- const_cast< char * >( o.objdata() )[ i ] = c;
- }
- };
+class ToStringRecursionDepth {
+public:
+ // create a nested BSON object with the specified recursion depth
+ BSONObj recursiveBSON(int depth) {
+ BSONObjBuilder b;
+ if (depth == 0) {
+ b << "name"
+ << "Joe";
+ return b.obj();
+ }
+ b.append("test", recursiveBSON(depth - 1));
+ return b.obj();
+ }
- class BadType : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":1}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, 4, 50 );
- return ret;
- }
- };
+ void run() {
+ BSONObj nestedBSON;
+ StringBuilder s;
+ string nestedBSONString;
+ size_t found;
+
+ // recursion depth one less than max allowed-- do not shorten the string
+ nestedBSON = recursiveBSON(BSONObj::maxToStringRecursionDepth - 1);
+ nestedBSON.toString(s, true, false);
+ nestedBSONString = s.str();
+ found = nestedBSONString.find("...");
+ // did not find the "..." pattern
+ ASSERT_EQUALS(found != string::npos, false);
+
+ // recursion depth is equal to max allowed -- do not shorten the string
+ nestedBSON = recursiveBSON(BSONObj::maxToStringRecursionDepth);
+ nestedBSON.toString(s, true, false);
+ nestedBSONString = s.str();
+ found = nestedBSONString.find("...");
+ // did not find the "..." pattern
+ ASSERT_EQUALS(found != string::npos, false);
+
+ // recursion depth - one greater than max allowed -- shorten the string
+ nestedBSON = recursiveBSON(BSONObj::maxToStringRecursionDepth + 1);
+ nestedBSON.toString(s, false, false);
+ nestedBSONString = s.str();
+ found = nestedBSONString.find("...");
+ // found the "..." pattern
+ ASSERT_EQUALS(found != string::npos, true);
+
+ /* recursion depth - one greater than max allowed but with full=true
+ * should fail with an assertion
+ */
+ nestedBSON = recursiveBSON(BSONObj::maxToStringRecursionDepth + 1);
+ ASSERT_THROWS(nestedBSON.toString(s, false, true), UserException);
+ }
+};
+
+class StringWithNull {
+public:
+ void run() {
+ const string input = string("a") + '\0' + 'b';
+ ASSERT_EQUALS(input.size(), 3U);
+
+ BSONObj obj = BSON("str" << input);
+ const string output = obj.firstElement().String();
+ ASSERT_EQUALS(escape(output), escape(input)); // for better failure output
+ ASSERT_EQUALS(output, input);
+ }
+};
- class EooBeforeEnd : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":1}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- // (first byte of size)++
- set( ret, 0, get( ret, 0 ) + 1 );
- // re-read size for BSONObj::details
- return ret.copy();
- }
- };
+namespace Validation {
- class Undefined : public Base {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendNull( "a" );
- BSONObj o = b.done();
- set( o, 4, mongo::Undefined );
- ASSERT( o.valid() );
- }
- };
+class Base {
+public:
+ virtual ~Base() {}
+ void run() {
+ ASSERT(valid().valid());
+ ASSERT(!invalid().valid());
+ }
- class TotalSizeTooSmall : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":1}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- // (first byte of size)--
- set( ret, 0, get( ret, 0 ) - 1 );
- // re-read size for BSONObj::details
- return ret.copy();
- }
- };
+protected:
+ virtual BSONObj valid() const {
+ return BSONObj();
+ }
+ virtual BSONObj invalid() const {
+ return BSONObj();
+ }
+ static char get(const BSONObj& o, int i) {
+ return o.objdata()[i];
+ }
+ static void set(BSONObj& o, int i, char c) {
+ const_cast<char*>(o.objdata())[i] = c;
+ }
+};
- class EooMissing : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":1}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, ret.objsize() - 1, (char) 0xff );
- // (first byte of size)--
- set( ret, 0, get( ret, 0 ) - 1 );
- // re-read size for BSONObj::details
- return ret.copy();
- }
- };
+class BadType : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":1}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set(ret, 4, 50);
+ return ret;
+ }
+};
- class WrongStringSize : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":\"b\"}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- ASSERT_EQUALS( ret.firstElement().valuestr()[0] , 'b' );
- ASSERT_EQUALS( ret.firstElement().valuestr()[1] , 0 );
- ((char*)ret.firstElement().valuestr())[1] = 1;
- return ret.copy();
- }
- };
+class EooBeforeEnd : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":1}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ // (first byte of size)++
+ set(ret, 0, get(ret, 0) + 1);
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+};
- class ZeroStringSize : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":\"b\"}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, 7, 0 );
- return ret;
- }
- };
+class Undefined : public Base {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendNull("a");
+ BSONObj o = b.done();
+ set(o, 4, mongo::Undefined);
+ ASSERT(o.valid());
+ }
+};
- class NegativeStringSize : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":\"b\"}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, 10, -100 );
- return ret;
- }
- };
+class TotalSizeTooSmall : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":1}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ // (first byte of size)--
+ set(ret, 0, get(ret, 0) - 1);
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+};
- class WrongSubobjectSize : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":{\"b\":1}}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, 0, get( ret, 0 ) + 1 );
- set( ret, 7, get( ret, 7 ) + 1 );
- return ret.copy();
- }
- };
+class EooMissing : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":1}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set(ret, ret.objsize() - 1, (char)0xff);
+ // (first byte of size)--
+ set(ret, 0, get(ret, 0) - 1);
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+};
- class WrongDbrefNsSize : public Base {
- BSONObj valid() const {
- return fromjson( "{ \"a\": Dbref( \"b\", \"ffffffffffffffffffffffff\" ) }" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, 0, get( ret, 0 ) + 1 );
- set( ret, 7, get( ret, 7 ) + 1 );
- return ret.copy();
- };
- };
-
- class NoFieldNameEnd : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":1}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- memset( const_cast< char * >( ret.objdata() ) + 5, 0xff, ret.objsize() - 5 );
- return ret;
- }
- };
+class WrongStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":\"b\"}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ ASSERT_EQUALS(ret.firstElement().valuestr()[0], 'b');
+ ASSERT_EQUALS(ret.firstElement().valuestr()[1], 0);
+ ((char*)ret.firstElement().valuestr())[1] = 1;
+ return ret.copy();
+ }
+};
- class BadRegex : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":/c/i}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- memset( const_cast< char * >( ret.objdata() ) + 7, 0xff, ret.objsize() - 7 );
- return ret;
- }
- };
+class ZeroStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":\"b\"}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set(ret, 7, 0);
+ return ret;
+ }
+};
- class BadRegexOptions : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":/c/i}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- memset( const_cast< char * >( ret.objdata() ) + 9, 0xff, ret.objsize() - 9 );
- return ret;
- }
- };
+class NegativeStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":\"b\"}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set(ret, 10, -100);
+ return ret;
+ }
+};
- class CodeWScopeBase : public Base {
- BSONObj valid() const {
- BSONObjBuilder b;
- BSONObjBuilder scope;
- scope.append( "a", "b" );
- b.appendCodeWScope( "c", "d", scope.done() );
- return b.obj();
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- modify( ret );
- return ret;
- }
- protected:
- virtual void modify( BSONObj &o ) const = 0;
- };
+class WrongSubobjectSize : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":{\"b\":1}}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set(ret, 0, get(ret, 0) + 1);
+ set(ret, 7, get(ret, 7) + 1);
+ return ret.copy();
+ }
+};
- class CodeWScopeSmallSize : public CodeWScopeBase {
- void modify( BSONObj &o ) const {
- set( o, 7, 7 );
- }
- };
+class WrongDbrefNsSize : public Base {
+ BSONObj valid() const {
+ return fromjson("{ \"a\": Dbref( \"b\", \"ffffffffffffffffffffffff\" ) }");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set(ret, 0, get(ret, 0) + 1);
+ set(ret, 7, get(ret, 7) + 1);
+ return ret.copy();
+ };
+};
- class CodeWScopeZeroStrSize : public CodeWScopeBase {
- void modify( BSONObj &o ) const {
- set( o, 11, 0 );
- }
- };
+class NoFieldNameEnd : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":1}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset(const_cast<char*>(ret.objdata()) + 5, 0xff, ret.objsize() - 5);
+ return ret;
+ }
+};
- class CodeWScopeSmallStrSize : public CodeWScopeBase {
- void modify( BSONObj &o ) const {
- set( o, 11, 1 );
- }
- };
+class BadRegex : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":/c/i}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset(const_cast<char*>(ret.objdata()) + 7, 0xff, ret.objsize() - 7);
+ return ret;
+ }
+};
- class CodeWScopeNoSizeForObj : public CodeWScopeBase {
- void modify( BSONObj &o ) const {
- set( o, 7, 13 );
- }
- };
+class BadRegexOptions : public Base {
+ BSONObj valid() const {
+ return fromjson("{\"a\":/c/i}");
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset(const_cast<char*>(ret.objdata()) + 9, 0xff, ret.objsize() - 9);
+ return ret;
+ }
+};
- class CodeWScopeSmallObjSize : public CodeWScopeBase {
- void modify( BSONObj &o ) const {
- set( o, 17, 1 );
- }
- };
+class CodeWScopeBase : public Base {
+ BSONObj valid() const {
+ BSONObjBuilder b;
+ BSONObjBuilder scope;
+ scope.append("a", "b");
+ b.appendCodeWScope("c", "d", scope.done());
+ return b.obj();
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ modify(ret);
+ return ret;
+ }
- class CodeWScopeBadObject : public CodeWScopeBase {
- void modify( BSONObj &o ) const {
- set( o, 21, JSTypeMax + 1 );
- }
- };
-
- class NoSize {
- public:
- NoSize( BSONType type ) : type_( type ) {}
- void run() {
- const char data[] = { 0x07, 0x00, 0x00, 0x00, char( type_ ), 'a', 0x00 };
- BSONObj o( data );
- ASSERT( !o.valid() );
- }
- private:
- BSONType type_;
- };
+protected:
+ virtual void modify(BSONObj& o) const = 0;
+};
- } // namespace Validation
+class CodeWScopeSmallSize : public CodeWScopeBase {
+ void modify(BSONObj& o) const {
+ set(o, 7, 7);
+ }
+};
- } // namespace BSONObjTests
+class CodeWScopeZeroStrSize : public CodeWScopeBase {
+ void modify(BSONObj& o) const {
+ set(o, 11, 0);
+ }
+};
- namespace OIDTests {
+class CodeWScopeSmallStrSize : public CodeWScopeBase {
+ void modify(BSONObj& o) const {
+ set(o, 11, 1);
+ }
+};
- class init1 {
- public:
- void run() {
- OID a;
- OID b;
+class CodeWScopeNoSizeForObj : public CodeWScopeBase {
+ void modify(BSONObj& o) const {
+ set(o, 7, 13);
+ }
+};
- a.init();
- b.init();
+class CodeWScopeSmallObjSize : public CodeWScopeBase {
+ void modify(BSONObj& o) const {
+ set(o, 17, 1);
+ }
+};
- ASSERT( a != b );
- }
- };
+class CodeWScopeBadObject : public CodeWScopeBase {
+ void modify(BSONObj& o) const {
+ set(o, 21, JSTypeMax + 1);
+ }
+};
+
+class NoSize {
+public:
+ NoSize(BSONType type) : type_(type) {}
+ void run() {
+ const char data[] = {0x07, 0x00, 0x00, 0x00, char(type_), 'a', 0x00};
+ BSONObj o(data);
+ ASSERT(!o.valid());
+ }
- class initParse1 {
- public:
- void run() {
+private:
+ BSONType type_;
+};
- OID a;
- OID b;
+} // namespace Validation
- a.init();
- b.init( a.toString() );
+} // namespace BSONObjTests
- ASSERT( a == b );
- }
- };
-
- class append {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendOID( "a" , 0 );
- b.appendOID( "b" , 0 , false );
- b.appendOID( "c" , 0 , true );
- BSONObj o = b.obj();
- keyTest(o);
-
- ASSERT( o["a"].__oid().toString() == "000000000000000000000000" );
- ASSERT( o["b"].__oid().toString() == "000000000000000000000000" );
- ASSERT( o["c"].__oid().toString() != "000000000000000000000000" );
+namespace OIDTests {
- }
- };
-
- class increasing {
- public:
- BSONObj g() {
- BSONObjBuilder b;
- b.appendOID( "_id" , 0 , true );
- return b.obj();
- }
- void run() {
- BSONObj a = g();
- BSONObj b = g();
-
- ASSERT( a.woCompare( b ) < 0 );
-
- // yes, there is a 1/1000 chance this won't increase time(0)
- // and therefore inaccurately say the function is behaving
- // buf if its broken, it will fail 999/1000, so i think that's good enough
- sleepsecs( 1 );
- BSONObj c = g();
- ASSERT( a.woCompare( c ) < 0 );
- }
- };
+class init1 {
+public:
+ void run() {
+ OID a;
+ OID b;
- class ToDate {
- public:
- void run() {
- OID oid;
- const Date_t base( ::time( 0 ) );
- oid.init( base );
+ a.init();
+ b.init();
- ASSERT_EQUALS( base.millis / 1000, oid.asDateT().millis / 1000 );
- ASSERT_EQUALS( base.toTimeT(), oid.asTimeT() );
- }
- };
-
- class FromDate {
- public:
- void run() {
- OID min, oid, max;
- Date_t now = jsTime();
- oid.init(); // slight chance this has different time. If its a problem, can change.
- min.init(now);
- max.init(now, true);
-
- ASSERT_EQUALS( (unsigned)oid.asTimeT() , now/1000 );
- ASSERT_EQUALS( (unsigned)min.asTimeT() , now/1000 );
- ASSERT_EQUALS( (unsigned)max.asTimeT() , now/1000 );
- ASSERT( BSON("" << min).woCompare( BSON("" << oid) ) < 0 );
- ASSERT( BSON("" << max).woCompare( BSON("" << oid) )> 0 );
- }
- };
+ ASSERT(a != b);
+ }
+};
- } // namespace OIDTests
+class initParse1 {
+public:
+ void run() {
+ OID a;
+ OID b;
+ a.init();
+ b.init(a.toString());
- namespace ValueStreamTests {
+ ASSERT(a == b);
+ }
+};
- class LabelBase {
- public:
- virtual ~LabelBase() {}
- void run() {
- ASSERT( !expected().woCompare( actual() ) );
- }
- protected:
- virtual BSONObj expected() = 0;
- virtual BSONObj actual() = 0;
- };
-
- class LabelBasic : public LabelBase {
- BSONObj expected() {
- return BSON( "a" << ( BSON( "$gt" << 1 ) ) );
- }
- BSONObj actual() {
- return BSON( "a" << GT << 1 );
- }
- };
+class append {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendOID("a", 0);
+ b.appendOID("b", 0, false);
+ b.appendOID("c", 0, true);
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT(o["a"].__oid().toString() == "000000000000000000000000");
+ ASSERT(o["b"].__oid().toString() == "000000000000000000000000");
+ ASSERT(o["c"].__oid().toString() != "000000000000000000000000");
+ }
+};
- class LabelShares : public LabelBase {
- BSONObj expected() {
- return BSON( "z" << "q" << "a" << ( BSON( "$gt" << 1 ) ) << "x" << "p" );
- }
- BSONObj actual() {
- return BSON( "z" << "q" << "a" << GT << 1 << "x" << "p" );
- }
- };
+class increasing {
+public:
+ BSONObj g() {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ return b.obj();
+ }
+ void run() {
+ BSONObj a = g();
+ BSONObj b = g();
+
+ ASSERT(a.woCompare(b) < 0);
+
+ // yes, there is a 1/1000 chance this won't increase time(0)
+ // and therefore inaccurately say the function is behaving
+ // buf if its broken, it will fail 999/1000, so i think that's good enough
+ sleepsecs(1);
+ BSONObj c = g();
+ ASSERT(a.woCompare(c) < 0);
+ }
+};
- class LabelDouble : public LabelBase {
- BSONObj expected() {
- return BSON( "a" << ( BSON( "$gt" << 1 << "$lte" << "x" ) ) );
- }
- BSONObj actual() {
- return BSON( "a" << GT << 1 << LTE << "x" );
- }
- };
+class ToDate {
+public:
+ void run() {
+ OID oid;
+ const Date_t base(::time(0));
+ oid.init(base);
- class LabelDoubleShares : public LabelBase {
- BSONObj expected() {
- return BSON( "z" << "q" << "a" << ( BSON( "$gt" << 1 << "$lte" << "x" ) ) << "x" << "p" );
- }
- BSONObj actual() {
- return BSON( "z" << "q" << "a" << GT << 1 << LTE << "x" << "x" << "p" );
- }
- };
+ ASSERT_EQUALS(base.millis / 1000, oid.asDateT().millis / 1000);
+ ASSERT_EQUALS(base.toTimeT(), oid.asTimeT());
+ }
+};
+
+class FromDate {
+public:
+ void run() {
+ OID min, oid, max;
+ Date_t now = jsTime();
+ oid.init(); // slight chance this has different time. If its a problem, can change.
+ min.init(now);
+ max.init(now, true);
+
+ ASSERT_EQUALS((unsigned)oid.asTimeT(), now / 1000);
+ ASSERT_EQUALS((unsigned)min.asTimeT(), now / 1000);
+ ASSERT_EQUALS((unsigned)max.asTimeT(), now / 1000);
+ ASSERT(BSON("" << min).woCompare(BSON("" << oid)) < 0);
+ ASSERT(BSON("" << max).woCompare(BSON("" << oid)) > 0);
+ }
+};
- class LabelSize : public LabelBase {
- BSONObj expected() {
- return BSON( "a" << BSON( "$size" << 4 ) );
- }
- BSONObj actual() {
- return BSON( "a" << mongo::BSIZE << 4 );
- }
- };
-
- class LabelMulti : public LabelBase {
- BSONObj expected() {
- return BSON( "z" << "q"
- << "a" << BSON( "$gt" << 1 << "$lte" << "x" )
- << "b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 )
- << "x" << "p" );
- }
- BSONObj actual() {
- return BSON( "z" << "q"
- << "a" << GT << 1 << LTE << "x"
- << "b" << NE << 1 << NE << "f" << NE << 22.3
- << "x" << "p" );
- }
- };
- class LabelishOr : public LabelBase {
- BSONObj expected() {
- return BSON( "$or" << BSON_ARRAY(
- BSON("a" << BSON( "$gt" << 1 << "$lte" << "x" ))
- << BSON("b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 ))
- << BSON("x" << "p" )));
- }
- BSONObj actual() {
- return OR( BSON( "a" << GT << 1 << LTE << "x"),
- BSON( "b" << NE << 1 << NE << "f" << NE << 22.3),
- BSON( "x" << "p" ) );
- }
- };
+} // namespace OIDTests
- class Unallowed {
- public:
- void run() {
- ASSERT_THROWS( BSON( GT << 4 ), MsgAssertionException );
- ASSERT_THROWS( BSON( "a" << 1 << GT << 4 ), MsgAssertionException );
- }
- };
-
- class ElementAppend {
- public:
- void run() {
- BSONObj a = BSON( "a" << 17 );
- BSONObj b = BSON( "b" << a["a"] );
- ASSERT_EQUALS( NumberInt , a["a"].type() );
- ASSERT_EQUALS( NumberInt , b["b"].type() );
- ASSERT_EQUALS( 17 , b["b"].number() );
- }
- };
- class AllTypes {
- public:
- void run() {
- // These are listed in order of BSONType
+namespace ValueStreamTests {
- ASSERT_EQUALS(objTypeOf(MINKEY), MinKey);
- ASSERT_EQUALS(arrTypeOf(MINKEY), MinKey);
+class LabelBase {
+public:
+ virtual ~LabelBase() {}
+ void run() {
+ ASSERT(!expected().woCompare(actual()));
+ }
- // EOO not valid in middle of BSONObj
+protected:
+ virtual BSONObj expected() = 0;
+ virtual BSONObj actual() = 0;
+};
- ASSERT_EQUALS(objTypeOf(1.0), NumberDouble);
- ASSERT_EQUALS(arrTypeOf(1.0), NumberDouble);
+class LabelBasic : public LabelBase {
+ BSONObj expected() {
+ return BSON("a" << (BSON("$gt" << 1)));
+ }
+ BSONObj actual() {
+ return BSON("a" << GT << 1);
+ }
+};
+
+class LabelShares : public LabelBase {
+ BSONObj expected() {
+ return BSON("z"
+ << "q"
+ << "a" << (BSON("$gt" << 1)) << "x"
+ << "p");
+ }
+ BSONObj actual() {
+ return BSON("z"
+ << "q"
+ << "a" << GT << 1 << "x"
+ << "p");
+ }
+};
- ASSERT_EQUALS(objTypeOf(""), String);
- ASSERT_EQUALS(arrTypeOf(""), String);
- ASSERT_EQUALS(objTypeOf(string()), String);
- ASSERT_EQUALS(arrTypeOf(string()), String);
- ASSERT_EQUALS(objTypeOf(StringData("")), String);
- ASSERT_EQUALS(arrTypeOf(StringData("")), String);
+class LabelDouble : public LabelBase {
+ BSONObj expected() {
+ return BSON("a" << (BSON("$gt" << 1 << "$lte"
+ << "x")));
+ }
+ BSONObj actual() {
+ return BSON("a" << GT << 1 << LTE << "x");
+ }
+};
+
+class LabelDoubleShares : public LabelBase {
+ BSONObj expected() {
+ return BSON("z"
+ << "q"
+ << "a" << (BSON("$gt" << 1 << "$lte"
+ << "x")) << "x"
+ << "p");
+ }
+ BSONObj actual() {
+ return BSON("z"
+ << "q"
+ << "a" << GT << 1 << LTE << "x"
+ << "x"
+ << "p");
+ }
+};
- ASSERT_EQUALS(objTypeOf(BSONObj()), Object);
- ASSERT_EQUALS(arrTypeOf(BSONObj()), Object);
+class LabelSize : public LabelBase {
+ BSONObj expected() {
+ return BSON("a" << BSON("$size" << 4));
+ }
+ BSONObj actual() {
+ return BSON("a" << mongo::BSIZE << 4);
+ }
+};
+
+class LabelMulti : public LabelBase {
+ BSONObj expected() {
+ return BSON("z"
+ << "q"
+ << "a" << BSON("$gt" << 1 << "$lte"
+ << "x") << "b" << BSON("$ne" << 1 << "$ne"
+ << "f"
+ << "$ne" << 22.3) << "x"
+ << "p");
+ }
+ BSONObj actual() {
+ return BSON("z"
+ << "q"
+ << "a" << GT << 1 << LTE << "x"
+ << "b" << NE << 1 << NE << "f" << NE << 22.3 << "x"
+ << "p");
+ }
+};
+class LabelishOr : public LabelBase {
+ BSONObj expected() {
+ return BSON("$or" << BSON_ARRAY(BSON("a" << BSON("$gt" << 1 << "$lte"
+ << "x"))
+ << BSON("b" << BSON("$ne" << 1 << "$ne"
+ << "f"
+ << "$ne" << 22.3))
+ << BSON("x"
+ << "p")));
+ }
+ BSONObj actual() {
+ return OR(BSON("a" << GT << 1 << LTE << "x"),
+ BSON("b" << NE << 1 << NE << "f" << NE << 22.3),
+ BSON("x"
+ << "p"));
+ }
+};
- ASSERT_EQUALS(objTypeOf(BSONArray()), Array);
- ASSERT_EQUALS(arrTypeOf(BSONArray()), Array);
+class Unallowed {
+public:
+ void run() {
+ ASSERT_THROWS(BSON(GT << 4), MsgAssertionException);
+ ASSERT_THROWS(BSON("a" << 1 << GT << 4), MsgAssertionException);
+ }
+};
+
+class ElementAppend {
+public:
+ void run() {
+ BSONObj a = BSON("a" << 17);
+ BSONObj b = BSON("b" << a["a"]);
+ ASSERT_EQUALS(NumberInt, a["a"].type());
+ ASSERT_EQUALS(NumberInt, b["b"].type());
+ ASSERT_EQUALS(17, b["b"].number());
+ }
+};
- ASSERT_EQUALS(objTypeOf(BSONBinData("", 0, BinDataGeneral)), BinData);
- ASSERT_EQUALS(arrTypeOf(BSONBinData("", 0, BinDataGeneral)), BinData);
+class AllTypes {
+public:
+ void run() {
+ // These are listed in order of BSONType
- ASSERT_EQUALS(objTypeOf(BSONUndefined), Undefined);
- ASSERT_EQUALS(arrTypeOf(BSONUndefined), Undefined);
+ ASSERT_EQUALS(objTypeOf(MINKEY), MinKey);
+ ASSERT_EQUALS(arrTypeOf(MINKEY), MinKey);
- ASSERT_EQUALS(objTypeOf(OID()), jstOID);
- ASSERT_EQUALS(arrTypeOf(OID()), jstOID);
+ // EOO not valid in middle of BSONObj
- ASSERT_EQUALS(objTypeOf(true), Bool);
- ASSERT_EQUALS(arrTypeOf(true), Bool);
+ ASSERT_EQUALS(objTypeOf(1.0), NumberDouble);
+ ASSERT_EQUALS(arrTypeOf(1.0), NumberDouble);
- ASSERT_EQUALS(objTypeOf(Date_t()), Date);
- ASSERT_EQUALS(arrTypeOf(Date_t()), Date);
+ ASSERT_EQUALS(objTypeOf(""), String);
+ ASSERT_EQUALS(arrTypeOf(""), String);
+ ASSERT_EQUALS(objTypeOf(string()), String);
+ ASSERT_EQUALS(arrTypeOf(string()), String);
+ ASSERT_EQUALS(objTypeOf(StringData("")), String);
+ ASSERT_EQUALS(arrTypeOf(StringData("")), String);
- ASSERT_EQUALS(objTypeOf(BSONNULL), jstNULL);
- ASSERT_EQUALS(arrTypeOf(BSONNULL), jstNULL);
+ ASSERT_EQUALS(objTypeOf(BSONObj()), Object);
+ ASSERT_EQUALS(arrTypeOf(BSONObj()), Object);
- ASSERT_EQUALS(objTypeOf(BSONRegEx("", "")), RegEx);
- ASSERT_EQUALS(arrTypeOf(BSONRegEx("", "")), RegEx);
+ ASSERT_EQUALS(objTypeOf(BSONArray()), Array);
+ ASSERT_EQUALS(arrTypeOf(BSONArray()), Array);
- ASSERT_EQUALS(objTypeOf(BSONDBRef("", OID())), DBRef);
- ASSERT_EQUALS(arrTypeOf(BSONDBRef("", OID())), DBRef);
+ ASSERT_EQUALS(objTypeOf(BSONBinData("", 0, BinDataGeneral)), BinData);
+ ASSERT_EQUALS(arrTypeOf(BSONBinData("", 0, BinDataGeneral)), BinData);
- ASSERT_EQUALS(objTypeOf(BSONCode("")), Code);
- ASSERT_EQUALS(arrTypeOf(BSONCode("")), Code);
+ ASSERT_EQUALS(objTypeOf(BSONUndefined), Undefined);
+ ASSERT_EQUALS(arrTypeOf(BSONUndefined), Undefined);
- ASSERT_EQUALS(objTypeOf(BSONSymbol("")), Symbol);
- ASSERT_EQUALS(arrTypeOf(BSONSymbol("")), Symbol);
+ ASSERT_EQUALS(objTypeOf(OID()), jstOID);
+ ASSERT_EQUALS(arrTypeOf(OID()), jstOID);
- ASSERT_EQUALS(objTypeOf(BSONCodeWScope("", BSONObj())), CodeWScope);
- ASSERT_EQUALS(arrTypeOf(BSONCodeWScope("", BSONObj())), CodeWScope);
+ ASSERT_EQUALS(objTypeOf(true), Bool);
+ ASSERT_EQUALS(arrTypeOf(true), Bool);
- ASSERT_EQUALS(objTypeOf(1), NumberInt);
- ASSERT_EQUALS(arrTypeOf(1), NumberInt);
+ ASSERT_EQUALS(objTypeOf(Date_t()), Date);
+ ASSERT_EQUALS(arrTypeOf(Date_t()), Date);
- ASSERT_EQUALS(objTypeOf(OpTime()), Timestamp);
- ASSERT_EQUALS(arrTypeOf(OpTime()), Timestamp);
+ ASSERT_EQUALS(objTypeOf(BSONNULL), jstNULL);
+ ASSERT_EQUALS(arrTypeOf(BSONNULL), jstNULL);
- ASSERT_EQUALS(objTypeOf(1LL), NumberLong);
- ASSERT_EQUALS(arrTypeOf(1LL), NumberLong);
+ ASSERT_EQUALS(objTypeOf(BSONRegEx("", "")), RegEx);
+ ASSERT_EQUALS(arrTypeOf(BSONRegEx("", "")), RegEx);
- ASSERT_EQUALS(objTypeOf(MAXKEY), MaxKey);
- ASSERT_EQUALS(arrTypeOf(MAXKEY), MaxKey);
- }
+ ASSERT_EQUALS(objTypeOf(BSONDBRef("", OID())), DBRef);
+ ASSERT_EQUALS(arrTypeOf(BSONDBRef("", OID())), DBRef);
- template<typename T>
- BSONType objTypeOf(const T& thing) {
- return BSON("" << thing).firstElement().type();
- }
+ ASSERT_EQUALS(objTypeOf(BSONCode("")), Code);
+ ASSERT_EQUALS(arrTypeOf(BSONCode("")), Code);
- template<typename T>
- BSONType arrTypeOf(const T& thing) {
- return BSON_ARRAY(thing).firstElement().type();
- }
- };
- } // namespace ValueStreamTests
-
- class SubObjectBuilder {
- public:
- void run() {
- BSONObjBuilder b1;
- b1.append( "a", "bcd" );
- BSONObjBuilder b2( b1.subobjStart( "foo" ) );
- b2.append( "ggg", 44.0 );
- b2.done();
- b1.append( "f", 10.0 );
- BSONObj ret = b1.done();
- ASSERT( ret.valid() );
- ASSERT( ret.woCompare( fromjson( "{a:'bcd',foo:{ggg:44},f:10}" ) ) == 0 );
- }
- };
+ ASSERT_EQUALS(objTypeOf(BSONSymbol("")), Symbol);
+ ASSERT_EQUALS(arrTypeOf(BSONSymbol("")), Symbol);
- class DateBuilder {
- public:
- void run() {
- BSONObj o = BSON("" << Date_t(1234567890));
- ASSERT( o.firstElement().type() == Date );
- ASSERT( o.firstElement().date() == Date_t(1234567890) );
- }
- };
+ ASSERT_EQUALS(objTypeOf(BSONCodeWScope("", BSONObj())), CodeWScope);
+ ASSERT_EQUALS(arrTypeOf(BSONCodeWScope("", BSONObj())), CodeWScope);
- class DateNowBuilder {
- public:
- void run() {
- Date_t before = jsTime();
- BSONObj o = BSON("now" << DATENOW);
- Date_t after = jsTime();
+ ASSERT_EQUALS(objTypeOf(1), NumberInt);
+ ASSERT_EQUALS(arrTypeOf(1), NumberInt);
- ASSERT( o.valid() );
+ ASSERT_EQUALS(objTypeOf(OpTime()), Timestamp);
+ ASSERT_EQUALS(arrTypeOf(OpTime()), Timestamp);
- BSONElement e = o["now"];
- ASSERT( e.type() == Date );
- ASSERT( e.date() >= before );
- ASSERT( e.date() <= after );
- }
- };
+ ASSERT_EQUALS(objTypeOf(1LL), NumberLong);
+ ASSERT_EQUALS(arrTypeOf(1LL), NumberLong);
- class TimeTBuilder {
- public:
- void run() {
- Date_t before = jsTime();
- sleepmillis(2);
- time_t now = jsTime().toTimeT();
- sleepmillis(2);
- Date_t after = jsTime();
+ ASSERT_EQUALS(objTypeOf(MAXKEY), MaxKey);
+ ASSERT_EQUALS(arrTypeOf(MAXKEY), MaxKey);
+ }
- BSONObjBuilder b;
- b.appendTimeT("now", now);
- BSONObj o = b.obj();
+ template <typename T>
+ BSONType objTypeOf(const T& thing) {
+ return BSON("" << thing).firstElement().type();
+ }
- ASSERT( o.valid() );
+ template <typename T>
+ BSONType arrTypeOf(const T& thing) {
+ return BSON_ARRAY(thing).firstElement().type();
+ }
+};
+} // namespace ValueStreamTests
+
+class SubObjectBuilder {
+public:
+ void run() {
+ BSONObjBuilder b1;
+ b1.append("a", "bcd");
+ BSONObjBuilder b2(b1.subobjStart("foo"));
+ b2.append("ggg", 44.0);
+ b2.done();
+ b1.append("f", 10.0);
+ BSONObj ret = b1.done();
+ ASSERT(ret.valid());
+ ASSERT(ret.woCompare(fromjson("{a:'bcd',foo:{ggg:44},f:10}")) == 0);
+ }
+};
+
+class DateBuilder {
+public:
+ void run() {
+ BSONObj o = BSON("" << Date_t(1234567890));
+ ASSERT(o.firstElement().type() == Date);
+ ASSERT(o.firstElement().date() == Date_t(1234567890));
+ }
+};
- BSONElement e = o["now"];
- ASSERT( e.type() == Date );
- ASSERT( e.date()/1000 >= before/1000 );
- ASSERT( e.date()/1000 <= after/1000 );
- }
- };
+class DateNowBuilder {
+public:
+ void run() {
+ Date_t before = jsTime();
+ BSONObj o = BSON("now" << DATENOW);
+ Date_t after = jsTime();
- class MinMaxKeyBuilder {
- public:
- void run() {
- BSONObj min = BSON( "a" << MINKEY );
- BSONObj max = BSON( "b" << MAXKEY );
+ ASSERT(o.valid());
- ASSERT( min.valid() );
- ASSERT( max.valid() );
+ BSONElement e = o["now"];
+ ASSERT(e.type() == Date);
+ ASSERT(e.date() >= before);
+ ASSERT(e.date() <= after);
+ }
+};
- BSONElement minElement = min["a"];
- BSONElement maxElement = max["b"];
- ASSERT( minElement.type() == MinKey );
- ASSERT( maxElement.type() == MaxKey );
- }
- };
+class TimeTBuilder {
+public:
+ void run() {
+ Date_t before = jsTime();
+ sleepmillis(2);
+ time_t now = jsTime().toTimeT();
+ sleepmillis(2);
+ Date_t after = jsTime();
- class MinMaxElementTest {
- public:
+ BSONObjBuilder b;
+ b.appendTimeT("now", now);
+ BSONObj o = b.obj();
- BSONObj min( int t ) {
- BSONObjBuilder b;
- b.appendMinForType( "a" , t );
- return b.obj();
- }
+ ASSERT(o.valid());
- BSONObj max( int t ) {
- BSONObjBuilder b;
- b.appendMaxForType( "a" , t );
- return b.obj();
- }
+ BSONElement e = o["now"];
+ ASSERT(e.type() == Date);
+ ASSERT(e.date() / 1000 >= before / 1000);
+ ASSERT(e.date() / 1000 <= after / 1000);
+ }
+};
- void run() {
- for ( int t=1; t<JSTypeMax; t++ ) {
- stringstream ss;
- ss << "type: " << t;
- string s = ss.str();
- ASSERT( min( t ).woCompare( max( t ) ) <= 0 );
- ASSERT( max( t ).woCompare( min( t ) ) >= 0 );
- ASSERT( min( t ).woCompare( min( t ) ) == 0 );
- ASSERT( max( t ).woCompare( max( t ) ) == 0 );
- }
- }
- };
+class MinMaxKeyBuilder {
+public:
+ void run() {
+ BSONObj min = BSON("a" << MINKEY);
+ BSONObj max = BSON("b" << MAXKEY);
- class ExtractFieldsTest {
- public:
- void run() {
- BSONObj x = BSON( "a" << 10 << "b" << 11 );
- verify( BSON( "a" << 10 ).woCompare( x.extractFields( BSON( "a" << 1 ) ) ) == 0 );
- verify( BSON( "b" << 11 ).woCompare( x.extractFields( BSON( "b" << 1 ) ) ) == 0 );
- verify( x.woCompare( x.extractFields( BSON( "a" << 1 << "b" << 1 ) ) ) == 0 );
+ ASSERT(min.valid());
+ ASSERT(max.valid());
- verify( (string)"a" == x.extractFields( BSON( "a" << 1 << "c" << 1 ) ).firstElementFieldName() );
- }
- };
+ BSONElement minElement = min["a"];
+ BSONElement maxElement = max["b"];
+ ASSERT(minElement.type() == MinKey);
+ ASSERT(maxElement.type() == MaxKey);
+ }
+};
- class ComparatorTest {
- public:
- BSONObj one( string s ) {
- return BSON( "x" << s );
- }
- BSONObj two( string x , string y ) {
- BSONObjBuilder b;
- b.append( "x" , x );
- if ( y.size() )
- b.append( "y" , y );
- else
- b.appendNull( "y" );
- return b.obj();
- }
+class MinMaxElementTest {
+public:
+ BSONObj min(int t) {
+ BSONObjBuilder b;
+ b.appendMinForType("a", t);
+ return b.obj();
+ }
- void test( BSONObj order , BSONObj l , BSONObj r , bool wanted ) {
- BSONObjCmp c( order );
- bool got = c(l,r);
- if ( got == wanted )
- return;
- cout << " order: " << order << " l: " << l << "r: " << r << " wanted: " << wanted << " got: " << got << endl;
- }
+ BSONObj max(int t) {
+ BSONObjBuilder b;
+ b.appendMaxForType("a", t);
+ return b.obj();
+ }
- void lt( BSONObj order , BSONObj l , BSONObj r ) {
- test( order , l , r , 1 );
+ void run() {
+ for (int t = 1; t < JSTypeMax; t++) {
+ stringstream ss;
+ ss << "type: " << t;
+ string s = ss.str();
+ ASSERT(min(t).woCompare(max(t)) <= 0);
+ ASSERT(max(t).woCompare(min(t)) >= 0);
+ ASSERT(min(t).woCompare(min(t)) == 0);
+ ASSERT(max(t).woCompare(max(t)) == 0);
}
+ }
+};
- void run() {
- BSONObj s = BSON( "x" << 1 );
- BSONObj c = BSON( "x" << 1 << "y" << 1 );
- test( s , one( "A" ) , one( "B" ) , 1 );
- test( s , one( "B" ) , one( "A" ) , 0 );
-
- test( c , two( "A" , "A" ) , two( "A" , "B" ) , 1 );
- test( c , two( "A" , "A" ) , two( "B" , "A" ) , 1 );
- test( c , two( "B" , "A" ) , two( "A" , "B" ) , 0 );
-
- lt( c , one("A") , two( "A" , "A" ) );
- lt( c , one("A") , one( "B" ) );
- lt( c , two("A","") , two( "B" , "A" ) );
-
- lt( c , two("B","A") , two( "C" , "A" ) );
- lt( c , two("B","A") , one( "C" ) );
- lt( c , two("B","A") , two( "C" , "" ) );
+class ExtractFieldsTest {
+public:
+ void run() {
+ BSONObj x = BSON("a" << 10 << "b" << 11);
+ verify(BSON("a" << 10).woCompare(x.extractFields(BSON("a" << 1))) == 0);
+ verify(BSON("b" << 11).woCompare(x.extractFields(BSON("b" << 1))) == 0);
+ verify(x.woCompare(x.extractFields(BSON("a" << 1 << "b" << 1))) == 0);
- }
- };
+ verify((string) "a" == x.extractFields(BSON("a" << 1 << "c" << 1)).firstElementFieldName());
+ }
+};
- class CompatBSON {
- public:
+class ComparatorTest {
+public:
+ BSONObj one(string s) {
+ return BSON("x" << s);
+ }
+ BSONObj two(string x, string y) {
+ BSONObjBuilder b;
+ b.append("x", x);
+ if (y.size())
+ b.append("y", y);
+ else
+ b.appendNull("y");
+ return b.obj();
+ }
-#define JSONBSONTEST(j,s) ASSERT_EQUALS( fromjson( j ).objsize() , s );
-#define RAWBSONTEST(j,s) ASSERT_EQUALS( j.objsize() , s );
+ void test(BSONObj order, BSONObj l, BSONObj r, bool wanted) {
+ BSONObjCmp c(order);
+ bool got = c(l, r);
+ if (got == wanted)
+ return;
+ cout << " order: " << order << " l: " << l << "r: " << r << " wanted: " << wanted
+ << " got: " << got << endl;
+ }
- void run() {
+ void lt(BSONObj order, BSONObj l, BSONObj r) {
+ test(order, l, r, 1);
+ }
- JSONBSONTEST( "{ 'x' : true }" , 9 );
- JSONBSONTEST( "{ 'x' : null }" , 8 );
- JSONBSONTEST( "{ 'x' : 5.2 }" , 16 );
- JSONBSONTEST( "{ 'x' : 'eliot' }" , 18 );
- JSONBSONTEST( "{ 'x' : 5.2 , 'y' : 'truth' , 'z' : 1.1 }" , 40 );
- JSONBSONTEST( "{ 'a' : { 'b' : 1.1 } }" , 24 );
- JSONBSONTEST( "{ 'x' : 5.2 , 'y' : { 'a' : 'eliot' , b : true } , 'z' : null }" , 44 );
- JSONBSONTEST( "{ 'x' : 5.2 , 'y' : [ 'a' , 'eliot' , 'b' , true ] , 'z' : null }" , 62 );
+ void run() {
+ BSONObj s = BSON("x" << 1);
+ BSONObj c = BSON("x" << 1 << "y" << 1);
+ test(s, one("A"), one("B"), 1);
+ test(s, one("B"), one("A"), 0);
- RAWBSONTEST( BSON( "x" << 4 ) , 12 );
+ test(c, two("A", "A"), two("A", "B"), 1);
+ test(c, two("A", "A"), two("B", "A"), 1);
+ test(c, two("B", "A"), two("A", "B"), 0);
- }
- };
+ lt(c, one("A"), two("A", "A"));
+ lt(c, one("A"), one("B"));
+ lt(c, two("A", ""), two("B", "A"));
- class CompareDottedFieldNamesTest {
- public:
- void t( FieldCompareResult res , const string& l , const string& r ) {
- LexNumCmp cmp( true );
- ASSERT_EQUALS( res , compareDottedFieldNames( l , r , cmp ) );
- ASSERT_EQUALS( -1 * res , compareDottedFieldNames( r , l , cmp ) );
- }
+ lt(c, two("B", "A"), two("C", "A"));
+ lt(c, two("B", "A"), one("C"));
+ lt(c, two("B", "A"), two("C", ""));
+ }
+};
+
+class CompatBSON {
+public:
+#define JSONBSONTEST(j, s) ASSERT_EQUALS(fromjson(j).objsize(), s);
+#define RAWBSONTEST(j, s) ASSERT_EQUALS(j.objsize(), s);
+
+ void run() {
+ JSONBSONTEST("{ 'x' : true }", 9);
+ JSONBSONTEST("{ 'x' : null }", 8);
+ JSONBSONTEST("{ 'x' : 5.2 }", 16);
+ JSONBSONTEST("{ 'x' : 'eliot' }", 18);
+ JSONBSONTEST("{ 'x' : 5.2 , 'y' : 'truth' , 'z' : 1.1 }", 40);
+ JSONBSONTEST("{ 'a' : { 'b' : 1.1 } }", 24);
+ JSONBSONTEST("{ 'x' : 5.2 , 'y' : { 'a' : 'eliot' , b : true } , 'z' : null }", 44);
+ JSONBSONTEST("{ 'x' : 5.2 , 'y' : [ 'a' , 'eliot' , 'b' , true ] , 'z' : null }", 62);
+
+ RAWBSONTEST(BSON("x" << 4), 12);
+ }
+};
+
+class CompareDottedFieldNamesTest {
+public:
+ void t(FieldCompareResult res, const string& l, const string& r) {
+ LexNumCmp cmp(true);
+ ASSERT_EQUALS(res, compareDottedFieldNames(l, r, cmp));
+ ASSERT_EQUALS(-1 * res, compareDottedFieldNames(r, l, cmp));
+ }
- void run() {
- t( SAME , "x" , "x" );
- t( SAME , "x.a" , "x.a" );
- t( SAME , "x.4" , "x.4" );
- t( LEFT_BEFORE , "a" , "b" );
- t( RIGHT_BEFORE , "b" , "a" );
- t( LEFT_BEFORE , "x.04" , "x.4" );
+ void run() {
+ t(SAME, "x", "x");
+ t(SAME, "x.a", "x.a");
+ t(SAME, "x.4", "x.4");
+ t(LEFT_BEFORE, "a", "b");
+ t(RIGHT_BEFORE, "b", "a");
+ t(LEFT_BEFORE, "x.04", "x.4");
- t( LEFT_SUBFIELD , "a.x" , "a" );
- t( LEFT_SUBFIELD , "a.4" , "a" );
- }
- };
+ t(LEFT_SUBFIELD, "a.x", "a");
+ t(LEFT_SUBFIELD, "a.4", "a");
+ }
+};
+
+class CompareDottedArrayFieldNamesTest {
+public:
+ void t(FieldCompareResult res, const string& l, const string& r) {
+ LexNumCmp cmp(false); // Specify numeric comparison for array field names.
+ ASSERT_EQUALS(res, compareDottedFieldNames(l, r, cmp));
+ ASSERT_EQUALS(-1 * res, compareDottedFieldNames(r, l, cmp));
+ }
- class CompareDottedArrayFieldNamesTest {
- public:
- void t( FieldCompareResult res , const string& l , const string& r ) {
- LexNumCmp cmp( false ); // Specify numeric comparison for array field names.
- ASSERT_EQUALS( res , compareDottedFieldNames( l , r , cmp ) );
- ASSERT_EQUALS( -1 * res , compareDottedFieldNames( r , l , cmp ) );
- }
-
- void run() {
- t( SAME , "0" , "0" );
- t( SAME , "1" , "1" );
- t( SAME , "0.1" , "0.1" );
- t( SAME , "0.a" , "0.a" );
- t( LEFT_BEFORE , "0" , "1" );
- t( LEFT_BEFORE , "2" , "10" );
- t( RIGHT_BEFORE , "1" , "0" );
- t( RIGHT_BEFORE , "10" , "2" );
-
- t( LEFT_SUBFIELD , "5.4" , "5" );
- t( LEFT_SUBFIELD , "5.x" , "5" );
- }
- };
-
- struct NestedDottedConversions {
- void t(const BSONObj& nest, const BSONObj& dot) {
- ASSERT_EQUALS( nested2dotted(nest), dot);
- ASSERT_EQUALS( nest, dotted2nested(dot));
- }
+ void run() {
+ t(SAME, "0", "0");
+ t(SAME, "1", "1");
+ t(SAME, "0.1", "0.1");
+ t(SAME, "0.a", "0.a");
+ t(LEFT_BEFORE, "0", "1");
+ t(LEFT_BEFORE, "2", "10");
+ t(RIGHT_BEFORE, "1", "0");
+ t(RIGHT_BEFORE, "10", "2");
+
+ t(LEFT_SUBFIELD, "5.4", "5");
+ t(LEFT_SUBFIELD, "5.x", "5");
+ }
+};
- void run() {
- t( BSON("a" << BSON("b" << 1)), BSON("a.b" << 1) );
- t( BSON("a" << BSON("b" << 1 << "c" << 1)), BSON("a.b" << 1 << "a.c" << 1) );
- t( BSON("a" << BSON("b" << 1 << "c" << 1) << "d" << 1), BSON("a.b" << 1 << "a.c" << 1 << "d" << 1) );
- t( BSON("a" << BSON("b" << 1 << "c" << 1 << "e" << BSON("f" << 1)) << "d" << 1), BSON("a.b" << 1 << "a.c" << 1 << "a.e.f" << 1 << "d" << 1) );
- }
- };
+struct NestedDottedConversions {
+ void t(const BSONObj& nest, const BSONObj& dot) {
+ ASSERT_EQUALS(nested2dotted(nest), dot);
+ ASSERT_EQUALS(nest, dotted2nested(dot));
+ }
- struct BSONArrayBuilderTest {
- void run() {
- int i = 0;
- BSONObjBuilder objb;
- BSONArrayBuilder arrb;
+ void run() {
+ t(BSON("a" << BSON("b" << 1)), BSON("a.b" << 1));
+ t(BSON("a" << BSON("b" << 1 << "c" << 1)), BSON("a.b" << 1 << "a.c" << 1));
+ t(BSON("a" << BSON("b" << 1 << "c" << 1) << "d" << 1),
+ BSON("a.b" << 1 << "a.c" << 1 << "d" << 1));
+ t(BSON("a" << BSON("b" << 1 << "c" << 1 << "e" << BSON("f" << 1)) << "d" << 1),
+ BSON("a.b" << 1 << "a.c" << 1 << "a.e.f" << 1 << "d" << 1));
+ }
+};
- objb << objb.numStr(i++) << 100;
- arrb << 100;
+struct BSONArrayBuilderTest {
+ void run() {
+ int i = 0;
+ BSONObjBuilder objb;
+ BSONArrayBuilder arrb;
- objb << objb.numStr(i++) << 1.0;
- arrb << 1.0;
+ objb << objb.numStr(i++) << 100;
+ arrb << 100;
- objb << objb.numStr(i++) << "Hello";
- arrb << "Hello";
+ objb << objb.numStr(i++) << 1.0;
+ arrb << 1.0;
- objb << objb.numStr(i++) << string("World");
- arrb << string("World");
+ objb << objb.numStr(i++) << "Hello";
+ arrb << "Hello";
- objb << objb.numStr(i++) << BSON( "a" << 1 << "b" << "foo" );
- arrb << BSON( "a" << 1 << "b" << "foo" );
+ objb << objb.numStr(i++) << string("World");
+ arrb << string("World");
- objb << objb.numStr(i++) << BSON( "a" << 1)["a"];
- arrb << BSON( "a" << 1)["a"];
+ objb << objb.numStr(i++) << BSON("a" << 1 << "b"
+ << "foo");
+ arrb << BSON("a" << 1 << "b"
+ << "foo");
- OID oid;
- oid.init();
- objb << objb.numStr(i++) << oid;
- arrb << oid;
+ objb << objb.numStr(i++) << BSON("a" << 1)["a"];
+ arrb << BSON("a" << 1)["a"];
- objb.appendUndefined(objb.numStr(i++));
- arrb.appendUndefined();
+ OID oid;
+ oid.init();
+ objb << objb.numStr(i++) << oid;
+ arrb << oid;
- objb.appendRegex(objb.numStr(i++), "test", "imx");
- arrb.appendRegex("test", "imx");
+ objb.appendUndefined(objb.numStr(i++));
+ arrb.appendUndefined();
- objb.appendBinData(objb.numStr(i++), 4, BinDataGeneral, "wow");
- arrb.appendBinData(4, BinDataGeneral, "wow");
+ objb.appendRegex(objb.numStr(i++), "test", "imx");
+ arrb.appendRegex("test", "imx");
- objb.appendCode(objb.numStr(i++), "function(){ return 1; }");
- arrb.appendCode("function(){ return 1; }");
+ objb.appendBinData(objb.numStr(i++), 4, BinDataGeneral, "wow");
+ arrb.appendBinData(4, BinDataGeneral, "wow");
- objb.appendCodeWScope(objb.numStr(i++), "function(){ return a; }", BSON("a" << 1));
- arrb.appendCodeWScope("function(){ return a; }", BSON("a" << 1));
+ objb.appendCode(objb.numStr(i++), "function(){ return 1; }");
+ arrb.appendCode("function(){ return 1; }");
- time_t dt(0);
- objb.appendTimeT(objb.numStr(i++), dt);
- arrb.appendTimeT(dt);
+ objb.appendCodeWScope(objb.numStr(i++), "function(){ return a; }", BSON("a" << 1));
+ arrb.appendCodeWScope("function(){ return a; }", BSON("a" << 1));
- Date_t date(0);
- objb.appendDate(objb.numStr(i++), date);
- arrb.appendDate(date);
+ time_t dt(0);
+ objb.appendTimeT(objb.numStr(i++), dt);
+ arrb.appendTimeT(dt);
- objb.append(objb.numStr(i++), BSONRegEx("test2", "s"));
- arrb.append(BSONRegEx("test2", "s"));
+ Date_t date(0);
+ objb.appendDate(objb.numStr(i++), date);
+ arrb.appendDate(date);
- BSONObj obj = objb.obj();
- BSONArray arr = arrb.arr();
+ objb.append(objb.numStr(i++), BSONRegEx("test2", "s"));
+ arrb.append(BSONRegEx("test2", "s"));
- ASSERT_EQUALS(obj, arr);
+ BSONObj obj = objb.obj();
+ BSONArray arr = arrb.arr();
- BSONObj o = BSON( "obj" << obj << "arr" << arr << "arr2" << BSONArray(obj)
- << "regex" << BSONRegEx("reg", "x"));
- keyTest(o);
+ ASSERT_EQUALS(obj, arr);
- ASSERT_EQUALS(o["obj"].type(), Object);
- ASSERT_EQUALS(o["arr"].type(), Array);
- ASSERT_EQUALS(o["arr2"].type(), Array);
- ASSERT_EQUALS(o["regex"].type(), RegEx);
- }
- };
+ BSONObj o = BSON("obj" << obj << "arr" << arr << "arr2" << BSONArray(obj) << "regex"
+ << BSONRegEx("reg", "x"));
+ keyTest(o);
- struct ArrayMacroTest {
- void run() {
- BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
- BSONObj obj = BSON( "0" << "hello"
- << "1" << 1
- << "2" << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+ ASSERT_EQUALS(o["obj"].type(), Object);
+ ASSERT_EQUALS(o["arr"].type(), Array);
+ ASSERT_EQUALS(o["arr2"].type(), Array);
+ ASSERT_EQUALS(o["regex"].type(), RegEx);
+ }
+};
+
+struct ArrayMacroTest {
+ void run() {
+ BSONArray arr = BSON_ARRAY("hello" << 1 << BSON("foo" << BSON_ARRAY("bar"
+ << "baz"
+ << "qux")));
+ BSONObj obj = BSON("0"
+ << "hello"
+ << "1" << 1 << "2" << BSON("foo" << BSON_ARRAY("bar"
+ << "baz"
+ << "qux")));
+
+ ASSERT_EQUALS(arr, obj);
+ ASSERT_EQUALS(arr["2"].type(), Object);
+ ASSERT_EQUALS(arr["2"].embeddedObject()["foo"].type(), Array);
+ }
+};
- ASSERT_EQUALS(arr, obj);
- ASSERT_EQUALS(arr["2"].type(), Object);
- ASSERT_EQUALS(arr["2"].embeddedObject()["foo"].type(), Array);
- }
- };
+class NumberParsing {
+public:
+ void run() {
+ BSONObjBuilder a;
+ BSONObjBuilder b;
- class NumberParsing {
- public:
- void run() {
- BSONObjBuilder a;
- BSONObjBuilder b;
+ a.append("a", (int)1);
+ ASSERT(b.appendAsNumber("a", "1"));
- a.append( "a" , (int)1 );
- ASSERT( b.appendAsNumber( "a" , "1" ) );
+ a.append("b", 1.1);
+ ASSERT(b.appendAsNumber("b", "1.1"));
- a.append( "b" , 1.1 );
- ASSERT( b.appendAsNumber( "b" , "1.1" ) );
+ a.append("c", (int)-1);
+ ASSERT(b.appendAsNumber("c", "-1"));
- a.append( "c" , (int)-1 );
- ASSERT( b.appendAsNumber( "c" , "-1" ) );
+ a.append("d", -1.1);
+ ASSERT(b.appendAsNumber("d", "-1.1"));
- a.append( "d" , -1.1 );
- ASSERT( b.appendAsNumber( "d" , "-1.1" ) );
+ a.append("e", (long long)32131231231232313LL);
+ ASSERT(b.appendAsNumber("e", "32131231231232313"));
- a.append( "e" , (long long)32131231231232313LL );
- ASSERT( b.appendAsNumber( "e" , "32131231231232313" ) );
+ ASSERT(!b.appendAsNumber("f", "zz"));
+ ASSERT(!b.appendAsNumber("f", "5zz"));
+ ASSERT(!b.appendAsNumber("f", "zz5"));
- ASSERT( ! b.appendAsNumber( "f" , "zz" ) );
- ASSERT( ! b.appendAsNumber( "f" , "5zz" ) );
- ASSERT( ! b.appendAsNumber( "f" , "zz5" ) );
+ ASSERT_EQUALS(a.obj(), b.obj());
+ }
+};
+
+class bson2settest {
+public:
+ void run() {
+ BSONObj o = BSON("z" << 1 << "a" << 2 << "m" << 3 << "c" << 4);
+ BSONObjIteratorSorted i(o);
+ stringstream ss;
+ while (i.more())
+ ss << i.next().fieldName();
+ ASSERT_EQUALS("acmz", ss.str());
- ASSERT_EQUALS( a.obj() , b.obj() );
+ {
+ Timer t;
+ for (int i = 0; i < 10000; i++) {
+ BSONObjIteratorSorted j(o);
+ int l = 0;
+ while (j.more())
+ l += strlen(j.next().fieldName());
+ }
+ // unsigned long long tm = t.micros();
+ // cout << "time: " << tm << endl;
}
- };
-
- class bson2settest {
- public:
- void run() {
- BSONObj o = BSON( "z" << 1 << "a" << 2 << "m" << 3 << "c" << 4 );
- BSONObjIteratorSorted i( o );
- stringstream ss;
- while ( i.more() )
- ss << i.next().fieldName();
- ASSERT_EQUALS( "acmz" , ss.str() );
- {
- Timer t;
- for ( int i=0; i<10000; i++ ) {
- BSONObjIteratorSorted j( o );
- int l = 0;
- while ( j.more() )
- l += strlen( j.next().fieldName() );
- }
- //unsigned long long tm = t.micros();
- //cout << "time: " << tm << endl;
- }
-
- BSONObj o2 = BSON( "2" << "a" << "11" << "b" );
- BSONObjIteratorSorted i2( o2 );
- // First field in sorted order should be "11" due use of a lexical comparison.
- ASSERT_EQUALS( "11", string( i2.next().fieldName() ) );
+ BSONObj o2 = BSON("2"
+ << "a"
+ << "11"
+ << "b");
+ BSONObjIteratorSorted i2(o2);
+ // First field in sorted order should be "11" due use of a lexical comparison.
+ ASSERT_EQUALS("11", string(i2.next().fieldName()));
+ }
+};
+
+class BSONArrayIteratorSorted {
+public:
+ void run() {
+ BSONArrayBuilder bab;
+ for (int i = 0; i < 11; ++i) {
+ bab << "a";
}
-
- };
-
- class BSONArrayIteratorSorted {
- public:
- void run() {
- BSONArrayBuilder bab;
- for( int i = 0; i < 11; ++i ) {
- bab << "a";
- }
- BSONArray arr = bab.arr();
- // The sorted iterator should perform numeric comparisons and return results in the same
- // order as the unsorted iterator.
- BSONObjIterator unsorted( arr );
- mongo::BSONArrayIteratorSorted sorted( arr );
- while( unsorted.more() ) {
- ASSERT( sorted.more() );
- ASSERT_EQUALS( string( unsorted.next().fieldName() ), sorted.next().fieldName() );
- }
- ASSERT( !sorted.more() );
+ BSONArray arr = bab.arr();
+ // The sorted iterator should perform numeric comparisons and return results in the same
+ // order as the unsorted iterator.
+ BSONObjIterator unsorted(arr);
+ mongo::BSONArrayIteratorSorted sorted(arr);
+ while (unsorted.more()) {
+ ASSERT(sorted.more());
+ ASSERT_EQUALS(string(unsorted.next().fieldName()), sorted.next().fieldName());
}
- };
-
- class checkForStorageTests {
- public:
+ ASSERT(!sorted.more());
+ }
+};
- void good( string s ) {
- good( fromjson( s ) );
- }
+class checkForStorageTests {
+public:
+ void good(string s) {
+ good(fromjson(s));
+ }
- void good( BSONObj o ) {
- if ( o.okForStorageAsRoot() )
- return;
- throw UserException( 12528 , (string)"should be ok for storage:" + o.toString() );
- }
+ void good(BSONObj o) {
+ if (o.okForStorageAsRoot())
+ return;
+ throw UserException(12528, (string) "should be ok for storage:" + o.toString());
+ }
- void bad( string s ) {
- bad( fromjson( s ) );
- }
+ void bad(string s) {
+ bad(fromjson(s));
+ }
- void bad( BSONObj o ) {
- if ( ! o.okForStorageAsRoot() )
- return;
- throw UserException( 12529 , (string)"should NOT be ok for storage:" + o.toString() );
- }
+ void bad(BSONObj o) {
+ if (!o.okForStorageAsRoot())
+ return;
+ throw UserException(12529, (string) "should NOT be ok for storage:" + o.toString());
+ }
- void run() {
- // basic docs are good
- good( "{}" );
- good( "{x:1}" );
- good( "{x:{a:2}}" );
-
- // no dots allowed
- bad( "{'x.y':1}" );
- bad( "{'x\\.y':1}" );
-
- // Check for $
- bad( "{x:{'$a':2}}" );
- good( "{'a$b':2}" );
- good( "{'a$': {b: 2}}" );
- good( "{'a$':2}" );
- good( "{'a $ a': 'foo'}" );
-
- // Queries are not ok
- bad( "{num: {$gt: 1}}" );
- bad( "{_id: {$regex:'test'}}" );
- bad( "{$gt: 2}" );
- bad( "{a : { oo: [ {$bad:1}, {good:1}] }}");
- good( "{a : { oo: [ {'\\\\$good':1}, {good:1}] }}");
-
- // DBRef stuff -- json parser can't handle this yet
- good( BSON("a" << BSON("$ref" << "coll" << "$id" << 1)) );
- good( BSON("a" << BSON("$ref" << "coll" << "$id" << 1 << "$db" << "a")) );
- good( BSON("a" << BSON("$ref" << "coll" << "$id" << 1 << "stuff" << 1)) );
- good( BSON("a" << BSON("$ref" << "coll" << "$id" << 1 << "$db" <<
- "a" << "stuff" << 1)) );
-
- bad( BSON("a" << BSON("$ref" << 1 << "$id" << 1)) );
- bad( BSON("a" << BSON("$ref" << 1 << "$id" << 1 << "$db" << "a")) );
- bad( BSON("a" << BSON("$ref" << "coll" << "$id" << 1 << "$db" << 1)) );
- bad( BSON("a" << BSON("$ref" << "coll")) );
- bad( BSON("a" << BSON("$ref" << "coll" << "$db" << "db")) );
- bad( BSON("a" << BSON("$id" << 1)) );
- bad( BSON("a" << BSON("$id" << 1 << "$ref" << "coll")) );
- bad( BSON("a" << BSON("$ref" << "coll" << "$id" << 1 << "$hater" << 1)) );
- bad( BSON("a" << BSON("$ref" << "coll" << "$id" << 1 << "dot.dot" << 1)) );
-
- // _id isn't a RegEx, or Array
- good( "{_id: 0}" );
- good( "{_id: {a:1, b:1}}" );
- good( "{_id: {rx: /a/}}" );
- good( "{_id: {rx: {$regex: 'a'}}}" );
- bad( "{_id: /a/ }" );
- bad( "{_id: /a/, other:1}" );
- bad( "{hi:1, _id: /a/ }" );
- bad( "{_id: /a/i }" );
- bad( "{first:/f/i, _id: /a/i }" );
- //Not really a regex type
- bad( "{_id: {$regex: 'a'} }" );
- bad( "{_id: {$regex: 'a', $options:'i'} }" );
- bad( "{_id: [1,2]}" );
- bad( "{_id: [1]}" );
+ void run() {
+ // basic docs are good
+ good("{}");
+ good("{x:1}");
+ good("{x:{a:2}}");
+
+ // no dots allowed
+ bad("{'x.y':1}");
+ bad("{'x\\.y':1}");
+
+ // Check for $
+ bad("{x:{'$a':2}}");
+ good("{'a$b':2}");
+ good("{'a$': {b: 2}}");
+ good("{'a$':2}");
+ good("{'a $ a': 'foo'}");
+
+ // Queries are not ok
+ bad("{num: {$gt: 1}}");
+ bad("{_id: {$regex:'test'}}");
+ bad("{$gt: 2}");
+ bad("{a : { oo: [ {$bad:1}, {good:1}] }}");
+ good("{a : { oo: [ {'\\\\$good':1}, {good:1}] }}");
+
+ // DBRef stuff -- json parser can't handle this yet
+ good(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$id" << 1)));
+ good(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$id" << 1 << "$db"
+ << "a")));
+ good(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$id" << 1 << "stuff" << 1)));
+ good(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$id" << 1 << "$db"
+ << "a"
+ << "stuff" << 1)));
+
+ bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1)));
+ bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1 << "$db"
+ << "a")));
+ bad(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$id" << 1 << "$db" << 1)));
+ bad(BSON("a" << BSON("$ref"
+ << "coll")));
+ bad(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$db"
+ << "db")));
+ bad(BSON("a" << BSON("$id" << 1)));
+ bad(BSON("a" << BSON("$id" << 1 << "$ref"
+ << "coll")));
+ bad(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$id" << 1 << "$hater" << 1)));
+ bad(BSON("a" << BSON("$ref"
+ << "coll"
+ << "$id" << 1 << "dot.dot" << 1)));
+
+ // _id isn't a RegEx, or Array
+ good("{_id: 0}");
+ good("{_id: {a:1, b:1}}");
+ good("{_id: {rx: /a/}}");
+ good("{_id: {rx: {$regex: 'a'}}}");
+ bad("{_id: /a/ }");
+ bad("{_id: /a/, other:1}");
+ bad("{hi:1, _id: /a/ }");
+ bad("{_id: /a/i }");
+ bad("{first:/f/i, _id: /a/i }");
+ // Not really a regex type
+ bad("{_id: {$regex: 'a'} }");
+ bad("{_id: {$regex: 'a', $options:'i'} }");
+ bad("{_id: [1,2]}");
+ bad("{_id: [1]}");
+ }
+};
+class InvalidIDFind {
+public:
+ void run() {
+ BSONObj x = BSON("_id" << 5 << "t" << 2);
+ {
+ char* crap = (char*)mongoMalloc(x.objsize());
+ memcpy(crap, x.objdata(), x.objsize());
+ BSONObj y(crap);
+ ASSERT_EQUALS(x, y);
+ free(crap);
}
- };
-
- class InvalidIDFind {
- public:
- void run() {
- BSONObj x = BSON( "_id" << 5 << "t" << 2 );
- {
- char * crap = (char*)mongoMalloc( x.objsize() );
- memcpy( crap , x.objdata() , x.objsize() );
- BSONObj y( crap );
- ASSERT_EQUALS( x , y );
- free( crap );
- }
-
- {
- char * crap = (char*)mongoMalloc( x.objsize() );
- memcpy( crap , x.objdata() , x.objsize() );
- int * foo = (int*)crap;
- foo[0] = 123123123;
- int state = 0;
- try {
- BSONObj y( crap );
- state = 1;
- }
- catch ( std::exception& e ) {
- state = 2;
- ASSERT( strstr( e.what() , "_id: 5" ) != NULL );
- }
- free( crap );
- ASSERT_EQUALS( 2 , state );
- }
-
+ {
+ char* crap = (char*)mongoMalloc(x.objsize());
+ memcpy(crap, x.objdata(), x.objsize());
+ int* foo = (int*)crap;
+ foo[0] = 123123123;
+ int state = 0;
+ try {
+ BSONObj y(crap);
+ state = 1;
+ } catch (std::exception& e) {
+ state = 2;
+ ASSERT(strstr(e.what(), "_id: 5") != NULL);
+ }
+ free(crap);
+ ASSERT_EQUALS(2, state);
}
- };
+ }
+};
- class ElementSetTest {
- public:
- void run() {
- BSONObj x = BSON( "a" << 1 << "b" << 1 << "c" << 2 );
- BSONElement a = x["a"];
- BSONElement b = x["b"];
- BSONElement c = x["c"];
- //cout << "c: " << c << endl;
- ASSERT( a.woCompare( b ) != 0 );
- ASSERT( a.woCompare( b , false ) == 0 );
+class ElementSetTest {
+public:
+ void run() {
+ BSONObj x = BSON("a" << 1 << "b" << 1 << "c" << 2);
+ BSONElement a = x["a"];
+ BSONElement b = x["b"];
+ BSONElement c = x["c"];
+ // cout << "c: " << c << endl;
+ ASSERT(a.woCompare(b) != 0);
+ ASSERT(a.woCompare(b, false) == 0);
- BSONElementSet s;
- s.insert( a );
- ASSERT_EQUALS( 1U , s.size() );
- s.insert( b );
- ASSERT_EQUALS( 1U , s.size() );
- ASSERT( ! s.count( c ) );
+ BSONElementSet s;
+ s.insert(a);
+ ASSERT_EQUALS(1U, s.size());
+ s.insert(b);
+ ASSERT_EQUALS(1U, s.size());
+ ASSERT(!s.count(c));
- ASSERT( s.find( a ) != s.end() );
- ASSERT( s.find( b ) != s.end() );
- ASSERT( s.find( c ) == s.end() );
+ ASSERT(s.find(a) != s.end());
+ ASSERT(s.find(b) != s.end());
+ ASSERT(s.find(c) == s.end());
- s.insert( c );
- ASSERT_EQUALS( 2U , s.size() );
+ s.insert(c);
+ ASSERT_EQUALS(2U, s.size());
- ASSERT( s.find( a ) != s.end() );
- ASSERT( s.find( b ) != s.end() );
- ASSERT( s.find( c ) != s.end() );
+ ASSERT(s.find(a) != s.end());
+ ASSERT(s.find(b) != s.end());
+ ASSERT(s.find(c) != s.end());
- ASSERT( s.count( a ) );
- ASSERT( s.count( b ) );
- ASSERT( s.count( c ) );
+ ASSERT(s.count(a));
+ ASSERT(s.count(b));
+ ASSERT(s.count(c));
- {
- BSONElementSet x;
- BSONObj o = fromjson( "{ 'a' : [ 1 , 2 , 1 ] }" );
- BSONObjIterator i( o["a"].embeddedObjectUserCheck() );
- while ( i.more() ) {
- x.insert( i.next() );
- }
- ASSERT_EQUALS( 2U , x.size() );
- }
- }
- };
-
- class EmbeddedNumbers {
- public:
- void run() {
- BSONObj x = BSON( "a" << BSON( "b" << 1 ) );
- BSONObj y = BSON( "a" << BSON( "b" << 1.0 ) );
- keyTest(x); keyTest(y);
- ASSERT_EQUALS( x , y );
- ASSERT_EQUALS( 0 , x.woCompare( y ) );
- }
- };
-
- class BuilderPartialItearte {
- public:
- void run() {
- {
- BSONObjBuilder b;
- b.append( "x" , 1 );
- b.append( "y" , 2 );
-
- BSONObjIterator i = b.iterator();
- ASSERT( i.more() );
- ASSERT_EQUALS( 1 , i.next().numberInt() );
- ASSERT( i.more() );
- ASSERT_EQUALS( 2 , i.next().numberInt() );
- ASSERT( ! i.more() );
-
- b.append( "z" , 3 );
-
- i = b.iterator();
- ASSERT( i.more() );
- ASSERT_EQUALS( 1 , i.next().numberInt() );
- ASSERT( i.more() );
- ASSERT_EQUALS( 2 , i.next().numberInt() );
- ASSERT( i.more() );
- ASSERT_EQUALS( 3 , i.next().numberInt() );
- ASSERT( ! i.more() );
-
- ASSERT_EQUALS( BSON( "x" << 1 << "y" << 2 << "z" << 3 ) , b.obj() );
+ {
+ BSONElementSet x;
+ BSONObj o = fromjson("{ 'a' : [ 1 , 2 , 1 ] }");
+ BSONObjIterator i(o["a"].embeddedObjectUserCheck());
+ while (i.more()) {
+ x.insert(i.next());
}
-
+ ASSERT_EQUALS(2U, x.size());
}
- };
-
- class BSONForEachTest {
- public:
- void run() {
- BSONObj obj = BSON("a" << 1 << "a" << 2 << "a" << 3);
-
- int count = 0;
- BSONForEach(e, obj) {
- ASSERT_EQUALS( e.fieldName() , string("a") );
- count += e.Int();
- }
+ }
+};
+
+class EmbeddedNumbers {
+public:
+ void run() {
+ BSONObj x = BSON("a" << BSON("b" << 1));
+ BSONObj y = BSON("a" << BSON("b" << 1.0));
+ keyTest(x);
+ keyTest(y);
+ ASSERT_EQUALS(x, y);
+ ASSERT_EQUALS(0, x.woCompare(y));
+ }
+};
- ASSERT_EQUALS( count , 1+2+3 );
+class BuilderPartialItearte {
+public:
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.append("x", 1);
+ b.append("y", 2);
+
+ BSONObjIterator i = b.iterator();
+ ASSERT(i.more());
+ ASSERT_EQUALS(1, i.next().numberInt());
+ ASSERT(i.more());
+ ASSERT_EQUALS(2, i.next().numberInt());
+ ASSERT(!i.more());
+
+ b.append("z", 3);
+
+ i = b.iterator();
+ ASSERT(i.more());
+ ASSERT_EQUALS(1, i.next().numberInt());
+ ASSERT(i.more());
+ ASSERT_EQUALS(2, i.next().numberInt());
+ ASSERT(i.more());
+ ASSERT_EQUALS(3, i.next().numberInt());
+ ASSERT(!i.more());
+
+ ASSERT_EQUALS(BSON("x" << 1 << "y" << 2 << "z" << 3), b.obj());
}
- };
-
- class CompareOps {
- public:
- void run() {
-
- BSONObj a = BSON("a"<<1);
- BSONObj b = BSON("a"<<1);
- BSONObj c = BSON("a"<<2);
- BSONObj d = BSON("a"<<3);
- BSONObj e = BSON("a"<<4);
- BSONObj f = BSON("a"<<4);
+ }
+};
- ASSERT( ! ( a < b ) );
- ASSERT( a <= b );
- ASSERT( a < c );
+class BSONForEachTest {
+public:
+ void run() {
+ BSONObj obj = BSON("a" << 1 << "a" << 2 << "a" << 3);
- ASSERT( f > d );
- ASSERT( f >= e );
- ASSERT( ! ( f > e ) );
+ int count = 0;
+ BSONForEach(e, obj) {
+ ASSERT_EQUALS(e.fieldName(), string("a"));
+ count += e.Int();
}
- };
- class NestedBuilderOversize {
- public:
- void run() {
- try {
- BSONObjBuilder outer;
- BSONObjBuilder inner(outer.subobjStart("inner"));
+ ASSERT_EQUALS(count, 1 + 2 + 3);
+ }
+};
+
+class CompareOps {
+public:
+ void run() {
+ BSONObj a = BSON("a" << 1);
+ BSONObj b = BSON("a" << 1);
+ BSONObj c = BSON("a" << 2);
+ BSONObj d = BSON("a" << 3);
+ BSONObj e = BSON("a" << 4);
+ BSONObj f = BSON("a" << 4);
+
+ ASSERT(!(a < b));
+ ASSERT(a <= b);
+ ASSERT(a < c);
+
+ ASSERT(f > d);
+ ASSERT(f >= e);
+ ASSERT(!(f > e));
+ }
+};
- string bigStr(1000, 'x');
- while (true) {
- ASSERT_LESS_THAN_OR_EQUALS(inner.len(), BufferMaxSize);
- inner.append("", bigStr);
- }
+class NestedBuilderOversize {
+public:
+ void run() {
+ try {
+ BSONObjBuilder outer;
+ BSONObjBuilder inner(outer.subobjStart("inner"));
- ASSERT(!"Expected Throw");
- } catch (const DBException& e) {
- if (e.getCode() != 13548) // we expect the code for oversized buffer
- throw;
+ string bigStr(1000, 'x');
+ while (true) {
+ ASSERT_LESS_THAN_OR_EQUALS(inner.len(), BufferMaxSize);
+ inner.append("", bigStr);
}
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "jsobj" ) {
- }
- void setupTests() {
- add< BufBuilderBasic >();
- add< BufBuilderReallocLimit >();
- add< BSONElementBasic >();
- add< BSONObjTests::NullString >();
- add< BSONObjTests::Create >();
- add< BSONObjTests::WoCompareBasic >();
- add< BSONObjTests::NumericCompareBasic >();
- add< BSONObjTests::WoCompareEmbeddedObject >();
- add< BSONObjTests::WoCompareEmbeddedArray >();
- add< BSONObjTests::WoCompareOrdered >();
- add< BSONObjTests::WoCompareDifferentLength >();
- add< BSONObjTests::WoSortOrder >();
- add< BSONObjTests::IsPrefixOf >();
- add< BSONObjTests::MultiKeySortOrder > ();
- add< BSONObjTests::Nan >();
- add< BSONObjTests::AsTempObj >();
- add< BSONObjTests::AppendIntOrLL >();
- add< BSONObjTests::AppendNumber >();
- add< BSONObjTests::ToStringArray >();
- add< BSONObjTests::ToStringNumber >();
- add< BSONObjTests::AppendAs >();
- add< BSONObjTests::GetField >();
- add< BSONObjTests::ToStringRecursionDepth >();
- add< BSONObjTests::StringWithNull >();
-
- add< BSONObjTests::Validation::BadType >();
- add< BSONObjTests::Validation::EooBeforeEnd >();
- add< BSONObjTests::Validation::Undefined >();
- add< BSONObjTests::Validation::TotalSizeTooSmall >();
- add< BSONObjTests::Validation::EooMissing >();
- add< BSONObjTests::Validation::WrongStringSize >();
- add< BSONObjTests::Validation::ZeroStringSize >();
- add< BSONObjTests::Validation::NegativeStringSize >();
- add< BSONObjTests::Validation::WrongSubobjectSize >();
- add< BSONObjTests::Validation::WrongDbrefNsSize >();
- add< BSONObjTests::Validation::NoFieldNameEnd >();
- add< BSONObjTests::Validation::BadRegex >();
- add< BSONObjTests::Validation::BadRegexOptions >();
- add< BSONObjTests::Validation::CodeWScopeSmallSize >();
- add< BSONObjTests::Validation::CodeWScopeZeroStrSize >();
- add< BSONObjTests::Validation::CodeWScopeSmallStrSize >();
- add< BSONObjTests::Validation::CodeWScopeNoSizeForObj >();
- add< BSONObjTests::Validation::CodeWScopeSmallObjSize >();
- add< BSONObjTests::Validation::CodeWScopeBadObject >();
- add< BSONObjTests::Validation::NoSize >( Symbol );
- add< BSONObjTests::Validation::NoSize >( Code );
- add< BSONObjTests::Validation::NoSize >( String );
- add< BSONObjTests::Validation::NoSize >( CodeWScope );
- add< BSONObjTests::Validation::NoSize >( DBRef );
- add< BSONObjTests::Validation::NoSize >( Object );
- add< BSONObjTests::Validation::NoSize >( Array );
- add< BSONObjTests::Validation::NoSize >( BinData );
- add< OIDTests::init1 >();
- add< OIDTests::initParse1 >();
- add< OIDTests::append >();
- add< OIDTests::increasing >();
- add< OIDTests::ToDate >();
- add< OIDTests::FromDate >();
- add< ValueStreamTests::LabelBasic >();
- add< ValueStreamTests::LabelShares >();
- add< ValueStreamTests::LabelDouble >();
- add< ValueStreamTests::LabelDoubleShares >();
- add< ValueStreamTests::LabelSize >();
- add< ValueStreamTests::LabelMulti >();
- add< ValueStreamTests::LabelishOr >();
- add< ValueStreamTests::Unallowed >();
- add< ValueStreamTests::ElementAppend >();
- add< ValueStreamTests::AllTypes >();
- add< SubObjectBuilder >();
- add< DateBuilder >();
- add< DateNowBuilder >();
- add< TimeTBuilder >();
- add< MinMaxKeyBuilder >();
- add< MinMaxElementTest >();
- add< ComparatorTest >();
- add< ExtractFieldsTest >();
- add< CompatBSON >();
- add< CompareDottedFieldNamesTest >();
- add< CompareDottedArrayFieldNamesTest >();
- add< NestedDottedConversions >();
- add< BSONArrayBuilderTest >();
- add< ArrayMacroTest >();
- add< NumberParsing >();
- add< bson2settest >();
- add< BSONArrayIteratorSorted >();
- add< checkForStorageTests >();
- add< InvalidIDFind >();
- add< ElementSetTest >();
- add< EmbeddedNumbers >();
- add< BuilderPartialItearte >();
- add< BSONForEachTest >();
- add< CompareOps >();
- add< NestedBuilderOversize >();
+ ASSERT(!"Expected Throw");
+ } catch (const DBException& e) {
+ if (e.getCode() != 13548) // we expect the code for oversized buffer
+ throw;
}
- };
-
- SuiteInstance<All> myall;
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("jsobj") {}
+
+ void setupTests() {
+ add<BufBuilderBasic>();
+ add<BufBuilderReallocLimit>();
+ add<BSONElementBasic>();
+ add<BSONObjTests::NullString>();
+ add<BSONObjTests::Create>();
+ add<BSONObjTests::WoCompareBasic>();
+ add<BSONObjTests::NumericCompareBasic>();
+ add<BSONObjTests::WoCompareEmbeddedObject>();
+ add<BSONObjTests::WoCompareEmbeddedArray>();
+ add<BSONObjTests::WoCompareOrdered>();
+ add<BSONObjTests::WoCompareDifferentLength>();
+ add<BSONObjTests::WoSortOrder>();
+ add<BSONObjTests::IsPrefixOf>();
+ add<BSONObjTests::MultiKeySortOrder>();
+ add<BSONObjTests::Nan>();
+ add<BSONObjTests::AsTempObj>();
+ add<BSONObjTests::AppendIntOrLL>();
+ add<BSONObjTests::AppendNumber>();
+ add<BSONObjTests::ToStringArray>();
+ add<BSONObjTests::ToStringNumber>();
+ add<BSONObjTests::AppendAs>();
+ add<BSONObjTests::GetField>();
+ add<BSONObjTests::ToStringRecursionDepth>();
+ add<BSONObjTests::StringWithNull>();
+
+ add<BSONObjTests::Validation::BadType>();
+ add<BSONObjTests::Validation::EooBeforeEnd>();
+ add<BSONObjTests::Validation::Undefined>();
+ add<BSONObjTests::Validation::TotalSizeTooSmall>();
+ add<BSONObjTests::Validation::EooMissing>();
+ add<BSONObjTests::Validation::WrongStringSize>();
+ add<BSONObjTests::Validation::ZeroStringSize>();
+ add<BSONObjTests::Validation::NegativeStringSize>();
+ add<BSONObjTests::Validation::WrongSubobjectSize>();
+ add<BSONObjTests::Validation::WrongDbrefNsSize>();
+ add<BSONObjTests::Validation::NoFieldNameEnd>();
+ add<BSONObjTests::Validation::BadRegex>();
+ add<BSONObjTests::Validation::BadRegexOptions>();
+ add<BSONObjTests::Validation::CodeWScopeSmallSize>();
+ add<BSONObjTests::Validation::CodeWScopeZeroStrSize>();
+ add<BSONObjTests::Validation::CodeWScopeSmallStrSize>();
+ add<BSONObjTests::Validation::CodeWScopeNoSizeForObj>();
+ add<BSONObjTests::Validation::CodeWScopeSmallObjSize>();
+ add<BSONObjTests::Validation::CodeWScopeBadObject>();
+ add<BSONObjTests::Validation::NoSize>(Symbol);
+ add<BSONObjTests::Validation::NoSize>(Code);
+ add<BSONObjTests::Validation::NoSize>(String);
+ add<BSONObjTests::Validation::NoSize>(CodeWScope);
+ add<BSONObjTests::Validation::NoSize>(DBRef);
+ add<BSONObjTests::Validation::NoSize>(Object);
+ add<BSONObjTests::Validation::NoSize>(Array);
+ add<BSONObjTests::Validation::NoSize>(BinData);
+ add<OIDTests::init1>();
+ add<OIDTests::initParse1>();
+ add<OIDTests::append>();
+ add<OIDTests::increasing>();
+ add<OIDTests::ToDate>();
+ add<OIDTests::FromDate>();
+ add<ValueStreamTests::LabelBasic>();
+ add<ValueStreamTests::LabelShares>();
+ add<ValueStreamTests::LabelDouble>();
+ add<ValueStreamTests::LabelDoubleShares>();
+ add<ValueStreamTests::LabelSize>();
+ add<ValueStreamTests::LabelMulti>();
+ add<ValueStreamTests::LabelishOr>();
+ add<ValueStreamTests::Unallowed>();
+ add<ValueStreamTests::ElementAppend>();
+ add<ValueStreamTests::AllTypes>();
+ add<SubObjectBuilder>();
+ add<DateBuilder>();
+ add<DateNowBuilder>();
+ add<TimeTBuilder>();
+ add<MinMaxKeyBuilder>();
+ add<MinMaxElementTest>();
+ add<ComparatorTest>();
+ add<ExtractFieldsTest>();
+ add<CompatBSON>();
+ add<CompareDottedFieldNamesTest>();
+ add<CompareDottedArrayFieldNamesTest>();
+ add<NestedDottedConversions>();
+ add<BSONArrayBuilderTest>();
+ add<ArrayMacroTest>();
+ add<NumberParsing>();
+ add<bson2settest>();
+ add<BSONArrayIteratorSorted>();
+ add<checkForStorageTests>();
+ add<InvalidIDFind>();
+ add<ElementSetTest>();
+ add<EmbeddedNumbers>();
+ add<BuilderPartialItearte>();
+ add<BSONForEachTest>();
+ add<CompareOps>();
+ add<NestedBuilderOversize>();
+ }
+};
-} // namespace JsobjTests
+SuiteInstance<All> myall;
+} // namespace JsobjTests
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
index 7d407d7ecd0..1ce749d9988 100644
--- a/src/mongo/dbtests/jsontests.cpp
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -43,2905 +43,2902 @@
namespace JsonTests {
- using std::cout;
- using std::endl;
- using std::numeric_limits;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- namespace JsonStringTests {
-
- class Empty {
- public:
- void run() {
- ASSERT_EQUALS( "{}", BSONObj().jsonString( Strict ) );
- }
- };
-
- class SingleStringMember {
- public:
- void run() {
- ASSERT_EQUALS( "{ \"a\" : \"b\" }", BSON( "a" << "b" ).jsonString( Strict ) );
- }
- };
-
- class EscapedCharacters {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", "\" \\ / \b \f \n \r \t" );
- ASSERT_EQUALS( "{ \"a\" : \"\\\" \\\\ / \\b \\f \\n \\r \\t\" }", b.done().jsonString( Strict ) );
- }
- };
-
- // per http://www.ietf.org/rfc/rfc4627.txt, control characters are
- // (U+0000 through U+001F). U+007F is not mentioned as a control character.
- class AdditionalControlCharacters {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", "\x1 \x1f" );
- ASSERT_EQUALS( "{ \"a\" : \"\\u0001 \\u001f\" }", b.done().jsonString( Strict ) );
- }
- };
-
- class ExtendedAscii {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", "\x80" );
- ASSERT_EQUALS( "{ \"a\" : \"\x80\" }", b.done().jsonString( Strict ) );
- }
- };
-
- class EscapeFieldName {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "\t", "b" );
- ASSERT_EQUALS( "{ \"\\t\" : \"b\" }", b.done().jsonString( Strict ) );
- }
- };
-
- class SingleIntMember {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", 1 );
- ASSERT_EQUALS( "{ \"a\" : 1 }", b.done().jsonString( Strict ) );
- }
- };
-
- class SingleNumberMember {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", 1.5 );
- ASSERT_EQUALS( "{ \"a\" : 1.5 }", b.done().jsonString( Strict ) );
- }
- };
-
- class InvalidNumbers {
- public:
- void run() {
- BSONObjBuilder c;
- c.append( "a", numeric_limits< double >::quiet_NaN() );
- string s = c.done().jsonString( Strict );
- // Note there is no NaN in the JSON RFC but what would be the alternative?
- ASSERT( str::contains(s, "NaN") );
-
- // commented out assertion as it doesn't throw anymore:
- //ASSERT_THROWS( c.done().jsonString( Strict ), AssertionException );
-
- BSONObjBuilder d;
- d.append( "a", numeric_limits< double >::signaling_NaN() );
- //ASSERT_THROWS( d.done().jsonString( Strict ), AssertionException );
- s = d.done().jsonString( Strict );
- ASSERT( str::contains(s, "NaN") );
- }
- };
-
- class NumberPrecision {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", 123456789 );
- ASSERT_EQUALS( "{ \"a\" : 123456789 }", b.done().jsonString( Strict ) );
- }
- };
-
- class NegativeNumber {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", -1 );
- ASSERT_EQUALS( "{ \"a\" : -1 }", b.done().jsonString( Strict ) );
- }
- };
-
- class NumberLongStrict {
- public:
- void run() {
- BSONObjBuilder b;
- b.append("a", 20000LL);
- ASSERT_EQUALS("{ \"a\" : { \"$numberLong\" : \"20000\" } }",
- b.done().jsonString(Strict));
- }
- };
-
- // Test a NumberLong that is too big to fit into a 32 bit integer
- class NumberLongStrictLarge {
- public:
- void run() {
- BSONObjBuilder b;
- b.append("a", 9223372036854775807LL);
- ASSERT_EQUALS("{ \"a\" : { \"$numberLong\" : \"9223372036854775807\" } }",
- b.done().jsonString(Strict));
- }
- };
-
- class NumberLongStrictNegative {
- public:
- void run() {
- BSONObjBuilder b;
- b.append("a", -20000LL);
- ASSERT_EQUALS("{ \"a\" : { \"$numberLong\" : \"-20000\" } }",
- b.done().jsonString(Strict));
- }
- };
-
- class NumberDoubleNaN {
- public:
- void run() {
- BSONObjBuilder b;
- b.append("a", std::numeric_limits<double>::quiet_NaN());
- ASSERT_EQUALS("{ \"a\" : NaN }", b.done().jsonString(Strict));
- }
- };
-
- class NumberDoubleInfinity {
- public:
- void run() {
- BSONObjBuilder b;
- b.append("a", std::numeric_limits<double>::infinity());
- ASSERT_EQUALS("{ \"a\" : Infinity }", b.done().jsonString(Strict));
- }
- };
-
- class NumberDoubleNegativeInfinity {
- public:
- void run() {
- BSONObjBuilder b;
- b.append("a", -std::numeric_limits<double>::infinity());
- ASSERT_EQUALS("{ \"a\" : -Infinity }", b.done().jsonString(Strict));
- }
- };
-
- class SingleBoolMember {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendBool( "a", true );
- ASSERT_EQUALS( "{ \"a\" : true }", b.done().jsonString( Strict ) );
-
- BSONObjBuilder c;
- c.appendBool( "a", false );
- ASSERT_EQUALS( "{ \"a\" : false }", c.done().jsonString( Strict ) );
- }
- };
-
- class SingleNullMember {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendNull( "a" );
- ASSERT_EQUALS( "{ \"a\" : null }", b.done().jsonString( Strict ) );
- }
- };
-
- class SingleUndefinedMember {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendUndefined( "a" );
- ASSERT_EQUALS( "{ \"a\" : { \"$undefined\" : true } }", b.done().jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : undefined }", b.done().jsonString( JS ) );
- ASSERT_EQUALS( "{ \"a\" : undefined }", b.done().jsonString( TenGen ) );
- }
- };
-
- class SingleObjectMember {
- public:
- void run() {
- BSONObjBuilder b, c;
- b.append( "a", c.done() );
- ASSERT_EQUALS( "{ \"a\" : {} }", b.done().jsonString( Strict ) );
- }
- };
-
- class TwoMembers {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", 1 );
- b.append( "b", 2 );
- ASSERT_EQUALS( "{ \"a\" : 1, \"b\" : 2 }", b.done().jsonString( Strict ) );
- }
- };
-
- class EmptyArray {
- public:
- void run() {
- vector< int > arr;
- BSONObjBuilder b;
- b.append( "a", arr );
- ASSERT_EQUALS( "{ \"a\" : [] }", b.done().jsonString( Strict ) );
- }
- };
-
- class Array {
- public:
- void run() {
- vector< int > arr;
- arr.push_back( 1 );
- arr.push_back( 2 );
- BSONObjBuilder b;
- b.append( "a", arr );
- ASSERT_EQUALS( "{ \"a\" : [ 1, 2 ] }", b.done().jsonString( Strict ) );
- }
- };
-
- class DBRef {
- public:
- void run() {
- char OIDbytes[OID::kOIDSize];
- memset( &OIDbytes, 0xff, OID::kOIDSize );
- OID oid = OID::from(OIDbytes);
- BSONObjBuilder b;
- b.appendDBRef( "a", "namespace", oid );
- BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
- built.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
- built.jsonString( JS ) );
- ASSERT_EQUALS( "{ \"a\" : Dbref( \"namespace\", \"ffffffffffffffffffffffff\" ) }",
- built.jsonString( TenGen ) );
- }
- };
-
- class DBRefZero {
- public:
- void run() {
- char OIDbytes[OID::kOIDSize];
- memset( &OIDbytes, 0, OID::kOIDSize );
- OID oid = OID::from(OIDbytes);
- BSONObjBuilder b;
- b.appendDBRef( "a", "namespace", oid );
- ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"000000000000000000000000\" } }",
- b.done().jsonString( Strict ) );
- }
- };
-
- class ObjectId {
- public:
- void run() {
- char OIDbytes[OID::kOIDSize];
- memset( &OIDbytes, 0xff, OID::kOIDSize );
- OID oid = OID::from(OIDbytes);
- BSONObjBuilder b;
- b.appendOID( "a", &oid );
- BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$oid\" : \"ffffffffffffffffffffffff\" } }",
- built.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : ObjectId( \"ffffffffffffffffffffffff\" ) }",
- built.jsonString( TenGen ) );
- }
- };
-
- class BinData {
- public:
- void run() {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, BinDataGeneral, z );
-
- string o = b.done().jsonString( Strict );
-
- ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }",
- o );
-
- BSONObjBuilder c;
- c.appendBinData( "a", 2, BinDataGeneral, z );
- ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }",
- c.done().jsonString( Strict ) );
-
- BSONObjBuilder d;
- d.appendBinData( "a", 1, BinDataGeneral, z );
- ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }",
- d.done().jsonString( Strict ) );
- }
- };
-
- class Symbol {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendSymbol( "a", "b" );
- ASSERT_EQUALS( "{ \"a\" : \"b\" }", b.done().jsonString( Strict ) );
- }
- };
+using std::cout;
+using std::endl;
+using std::numeric_limits;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+namespace JsonStringTests {
+
+class Empty {
+public:
+ void run() {
+ ASSERT_EQUALS("{}", BSONObj().jsonString(Strict));
+ }
+};
+
+class SingleStringMember {
+public:
+ void run() {
+ ASSERT_EQUALS("{ \"a\" : \"b\" }",
+ BSON("a"
+ << "b").jsonString(Strict));
+ }
+};
+
+class EscapedCharacters {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", "\" \\ / \b \f \n \r \t");
+ ASSERT_EQUALS("{ \"a\" : \"\\\" \\\\ / \\b \\f \\n \\r \\t\" }",
+ b.done().jsonString(Strict));
+ }
+};
+
+// per http://www.ietf.org/rfc/rfc4627.txt, control characters are
+// (U+0000 through U+001F). U+007F is not mentioned as a control character.
+class AdditionalControlCharacters {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", "\x1 \x1f");
+ ASSERT_EQUALS("{ \"a\" : \"\\u0001 \\u001f\" }", b.done().jsonString(Strict));
+ }
+};
+
+class ExtendedAscii {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", "\x80");
+ ASSERT_EQUALS("{ \"a\" : \"\x80\" }", b.done().jsonString(Strict));
+ }
+};
+
+class EscapeFieldName {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("\t", "b");
+ ASSERT_EQUALS("{ \"\\t\" : \"b\" }", b.done().jsonString(Strict));
+ }
+};
+
+class SingleIntMember {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ ASSERT_EQUALS("{ \"a\" : 1 }", b.done().jsonString(Strict));
+ }
+};
+
+class SingleNumberMember {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", 1.5);
+ ASSERT_EQUALS("{ \"a\" : 1.5 }", b.done().jsonString(Strict));
+ }
+};
+
+class InvalidNumbers {
+public:
+ void run() {
+ BSONObjBuilder c;
+ c.append("a", numeric_limits<double>::quiet_NaN());
+ string s = c.done().jsonString(Strict);
+ // Note there is no NaN in the JSON RFC but what would be the alternative?
+ ASSERT(str::contains(s, "NaN"));
+
+ // commented out assertion as it doesn't throw anymore:
+ // ASSERT_THROWS( c.done().jsonString( Strict ), AssertionException );
+
+ BSONObjBuilder d;
+ d.append("a", numeric_limits<double>::signaling_NaN());
+ // ASSERT_THROWS( d.done().jsonString( Strict ), AssertionException );
+ s = d.done().jsonString(Strict);
+ ASSERT(str::contains(s, "NaN"));
+ }
+};
+
+class NumberPrecision {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", 123456789);
+ ASSERT_EQUALS("{ \"a\" : 123456789 }", b.done().jsonString(Strict));
+ }
+};
+
+class NegativeNumber {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", -1);
+ ASSERT_EQUALS("{ \"a\" : -1 }", b.done().jsonString(Strict));
+ }
+};
+
+class NumberLongStrict {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", 20000LL);
+ ASSERT_EQUALS("{ \"a\" : { \"$numberLong\" : \"20000\" } }", b.done().jsonString(Strict));
+ }
+};
+
+// Test a NumberLong that is too big to fit into a 32 bit integer
+class NumberLongStrictLarge {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", 9223372036854775807LL);
+ ASSERT_EQUALS("{ \"a\" : { \"$numberLong\" : \"9223372036854775807\" } }",
+ b.done().jsonString(Strict));
+ }
+};
+
+class NumberLongStrictNegative {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", -20000LL);
+ ASSERT_EQUALS("{ \"a\" : { \"$numberLong\" : \"-20000\" } }", b.done().jsonString(Strict));
+ }
+};
+
+class NumberDoubleNaN {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", std::numeric_limits<double>::quiet_NaN());
+ ASSERT_EQUALS("{ \"a\" : NaN }", b.done().jsonString(Strict));
+ }
+};
+
+class NumberDoubleInfinity {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", std::numeric_limits<double>::infinity());
+ ASSERT_EQUALS("{ \"a\" : Infinity }", b.done().jsonString(Strict));
+ }
+};
+
+class NumberDoubleNegativeInfinity {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", -std::numeric_limits<double>::infinity());
+ ASSERT_EQUALS("{ \"a\" : -Infinity }", b.done().jsonString(Strict));
+ }
+};
+
+class SingleBoolMember {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendBool("a", true);
+ ASSERT_EQUALS("{ \"a\" : true }", b.done().jsonString(Strict));
+
+ BSONObjBuilder c;
+ c.appendBool("a", false);
+ ASSERT_EQUALS("{ \"a\" : false }", c.done().jsonString(Strict));
+ }
+};
+
+class SingleNullMember {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendNull("a");
+ ASSERT_EQUALS("{ \"a\" : null }", b.done().jsonString(Strict));
+ }
+};
+
+class SingleUndefinedMember {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendUndefined("a");
+ ASSERT_EQUALS("{ \"a\" : { \"$undefined\" : true } }", b.done().jsonString(Strict));
+ ASSERT_EQUALS("{ \"a\" : undefined }", b.done().jsonString(JS));
+ ASSERT_EQUALS("{ \"a\" : undefined }", b.done().jsonString(TenGen));
+ }
+};
+
+class SingleObjectMember {
+public:
+ void run() {
+ BSONObjBuilder b, c;
+ b.append("a", c.done());
+ ASSERT_EQUALS("{ \"a\" : {} }", b.done().jsonString(Strict));
+ }
+};
+
+class TwoMembers {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ b.append("b", 2);
+ ASSERT_EQUALS("{ \"a\" : 1, \"b\" : 2 }", b.done().jsonString(Strict));
+ }
+};
+
+class EmptyArray {
+public:
+ void run() {
+ vector<int> arr;
+ BSONObjBuilder b;
+ b.append("a", arr);
+ ASSERT_EQUALS("{ \"a\" : [] }", b.done().jsonString(Strict));
+ }
+};
+
+class Array {
+public:
+ void run() {
+ vector<int> arr;
+ arr.push_back(1);
+ arr.push_back(2);
+ BSONObjBuilder b;
+ b.append("a", arr);
+ ASSERT_EQUALS("{ \"a\" : [ 1, 2 ] }", b.done().jsonString(Strict));
+ }
+};
+
+class DBRef {
+public:
+ void run() {
+ char OIDbytes[OID::kOIDSize];
+ memset(&OIDbytes, 0xff, OID::kOIDSize);
+ OID oid = OID::from(OIDbytes);
+ BSONObjBuilder b;
+ b.appendDBRef("a", "namespace", oid);
+ BSONObj built = b.done();
+ ASSERT_EQUALS(
+ "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString(Strict));
+ ASSERT_EQUALS(
+ "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString(JS));
+ ASSERT_EQUALS("{ \"a\" : Dbref( \"namespace\", \"ffffffffffffffffffffffff\" ) }",
+ built.jsonString(TenGen));
+ }
+};
+
+class DBRefZero {
+public:
+ void run() {
+ char OIDbytes[OID::kOIDSize];
+ memset(&OIDbytes, 0, OID::kOIDSize);
+ OID oid = OID::from(OIDbytes);
+ BSONObjBuilder b;
+ b.appendDBRef("a", "namespace", oid);
+ ASSERT_EQUALS(
+ "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"000000000000000000000000\" } }",
+ b.done().jsonString(Strict));
+ }
+};
+
+class ObjectId {
+public:
+ void run() {
+ char OIDbytes[OID::kOIDSize];
+ memset(&OIDbytes, 0xff, OID::kOIDSize);
+ OID oid = OID::from(OIDbytes);
+ BSONObjBuilder b;
+ b.appendOID("a", &oid);
+ BSONObj built = b.done();
+ ASSERT_EQUALS("{ \"a\" : { \"$oid\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString(Strict));
+ ASSERT_EQUALS("{ \"a\" : ObjectId( \"ffffffffffffffffffffffff\" ) }",
+ built.jsonString(TenGen));
+ }
+};
+
+class BinData {
+public:
+ void run() {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, BinDataGeneral, z);
+
+ string o = b.done().jsonString(Strict);
+
+ ASSERT_EQUALS("{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }", o);
+
+ BSONObjBuilder c;
+ c.appendBinData("a", 2, BinDataGeneral, z);
+ ASSERT_EQUALS("{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }",
+ c.done().jsonString(Strict));
+
+ BSONObjBuilder d;
+ d.appendBinData("a", 1, BinDataGeneral, z);
+ ASSERT_EQUALS("{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }",
+ d.done().jsonString(Strict));
+ }
+};
+
+class Symbol {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendSymbol("a", "b");
+ ASSERT_EQUALS("{ \"a\" : \"b\" }", b.done().jsonString(Strict));
+ }
+};
#ifdef _WIN32
- char tzEnvString[] = "TZ=EST+5EDT";
+char tzEnvString[] = "TZ=EST+5EDT";
#else
- char tzEnvString[] = "TZ=America/New_York";
+char tzEnvString[] = "TZ=America/New_York";
#endif
- class Date {
- public:
- Date() {
- char *_oldTimezonePtr = getenv("TZ");
- _oldTimezone = std::string(_oldTimezonePtr ? _oldTimezonePtr : "");
- if (-1 == putenv(tzEnvString)) {
- FAIL(errnoWithDescription());
- }
- tzset();
- }
- ~Date() {
- if (!_oldTimezone.empty()) {
+class Date {
+public:
+ Date() {
+ char* _oldTimezonePtr = getenv("TZ");
+ _oldTimezone = std::string(_oldTimezonePtr ? _oldTimezonePtr : "");
+ if (-1 == putenv(tzEnvString)) {
+ FAIL(errnoWithDescription());
+ }
+ tzset();
+ }
+ ~Date() {
+ if (!_oldTimezone.empty()) {
#ifdef _WIN32
- errno_t ret = _putenv_s("TZ", _oldTimezone.c_str());
- if (0 != ret) {
- StringBuilder sb;
- sb << "Error setting TZ environment variable to: " << _oldTimezone
- << ". Error code: " << ret;
- FAIL(sb.str());
- }
+ errno_t ret = _putenv_s("TZ", _oldTimezone.c_str());
+ if (0 != ret) {
+ StringBuilder sb;
+ sb << "Error setting TZ environment variable to: " << _oldTimezone
+ << ". Error code: " << ret;
+ FAIL(sb.str());
+ }
#else
- if (-1 == setenv("TZ", _oldTimezone.c_str(), 1)) {
- FAIL(errnoWithDescription());
- }
+ if (-1 == setenv("TZ", _oldTimezone.c_str(), 1)) {
+ FAIL(errnoWithDescription());
+ }
#endif
- }
- else {
+ } else {
#ifdef _WIN32
- errno_t ret = _putenv_s("TZ", "");
- if (0 != ret) {
- StringBuilder sb;
- sb << "Error unsetting TZ environment variable. Error code: " << ret;
- FAIL(sb.str());
- }
-#else
- if (-1 == unsetenv("TZ")) {
- FAIL(errnoWithDescription());
- }
-#endif
- }
- tzset();
- }
-
- void run() {
- BSONObjBuilder b;
- b.appendDate( "a", 0 );
- BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$date\" : \"1969-12-31T19:00:00.000-0500\" } }",
- built.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", built.jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", built.jsonString( JS ) );
-
- // Test dates above our maximum formattable date. See SERVER-13760.
- BSONObjBuilder b2;
- b2.appendDate("a", 32535262800000ULL);
- BSONObj built2 = b2.done();
- ASSERT_EQUALS(
- "{ \"a\" : { \"$date\" : { \"$numberLong\" : \"32535262800000\" } } }",
- built2.jsonString( Strict ) );
- }
-
- private:
- std::string _oldTimezone;
-
- };
-
- class DateNegative {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendDate( "a", -1 );
- BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$date\" : { \"$numberLong\" : \"-1\" } } }",
- built.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : Date( -1 ) }", built.jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : Date( -1 ) }", built.jsonString( JS ) );
- }
- };
-
- class Regex {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendRegex( "a", "abc", "i" );
- BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"abc\", \"$options\" : \"i\" } }",
- built.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( JS ) );
- }
- };
-
- class RegexEscape {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendRegex( "a", "/\"", "i" );
- BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"/\\\"\", \"$options\" : \"i\" } }",
- built.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( JS ) );
- }
- };
-
- class RegexManyOptions {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendRegex( "a", "z", "abcgimx" );
- BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"z\", \"$options\" : \"abcgimx\" } }",
- built.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( JS ) );
- }
- };
-
- class CodeTests {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendCode( "x" , "function(arg){ var string = \"\\n\"; return 1; }" );
- BSONObj o = b.obj();
- ASSERT_EQUALS( "{ \"x\" : \"function(arg){ var string = \\\"\\\\n\\\"; "
- "return 1; }\" }" , o.jsonString() );
- }
- };
-
- class CodeWScopeTests {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendCodeWScope( "x" , "function(arg){ var string = \"\\n\"; return x; }" ,
- BSON("x" << 1 ) );
- BSONObj o = b.obj();
- ASSERT_EQUALS( "{ \"x\" : "
- "{ \"$code\" : "
- "\"function(arg){ var string = \\\"\\\\n\\\"; return x; }\" , "
- "\"$scope\" : { \"x\" : 1 } } }" ,
- o.jsonString() );
- }
- };
-
- class TimestampTests {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendTimestamp( "x" , 4000 , 10 );
- BSONObj o = b.obj();
- ASSERT_EQUALS( "{ \"x\" : { \"$timestamp\" : { \"t\" : 4, \"i\" : 10 } } }",
- o.jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"x\" : { \"$timestamp\" : { \"t\" : 4, \"i\" : 10 } } }",
- o.jsonString( JS ) );
- ASSERT_EQUALS( "{ \"x\" : Timestamp( 4, 10 ) }", o.jsonString( TenGen ) );
- }
- };
-
- class NullString {
- public:
- void run() {
- BSONObjBuilder b;
- b.append( "x" , "a\0b" , 4 );
- BSONObj o = b.obj();
- ASSERT_EQUALS( "{ \"x\" : \"a\\u0000b\" }" , o.jsonString() );
- }
- };
-
- class AllTypes {
- public:
- void run() {
- OID oid;
- oid.init();
-
- BSONObjBuilder b;
- b.appendMinKey( "a" );
- b.append( "b" , 5.5 );
- b.append( "c" , "abc" );
- b.append( "e" , BSON( "x" << 1 ) );
- b.append( "f" , BSON_ARRAY( 1 << 2 << 3 ) );
- b.appendBinData( "g" , sizeof(AllTypes) , bdtCustom , (const void*)this );
- b.appendUndefined( "h" );
- b.append( "i" , oid );
- b.appendBool( "j" , 1 );
- b.appendDate( "k" , 123 );
- b.appendNull( "l" );
- b.appendRegex( "m" , "a" );
- b.appendDBRef( "n" , "foo" , oid );
- b.appendCode( "o" , "function(){}" );
- b.appendSymbol( "p" , "foo" );
- b.appendCodeWScope( "q" , "function(){}" , BSON("x" << 1 ) );
- b.append( "r" , (int)5 );
- b.appendTimestamp( "s" , 123123123123123LL );
- b.append( "t" , 12321312312LL );
- b.appendMaxKey( "u" );
-
- BSONObj o = b.obj();
- o.jsonString();
- //cout << o.jsonString() << endl;
- }
- };
-
- } // namespace JsonStringTests
-
- namespace FromJsonTests {
-
- class Base {
- public:
- virtual ~Base() {}
- void run() {
- ASSERT( fromjson( json() ).valid() );
- assertEquals( bson(), fromjson( tojson( bson() ) ), "mode: <default>" );
- assertEquals( bson(), fromjson( tojson( bson(), Strict ) ), "mode: strict" );
- assertEquals( bson(), fromjson( tojson( bson(), TenGen ) ), "mode: tengen" );
- assertEquals( bson(), fromjson( tojson( bson(), JS ) ), "mode: js" );
- }
- protected:
- virtual BSONObj bson() const = 0;
- virtual string json() const = 0;
- private:
- void assertEquals( const BSONObj &expected,
- const BSONObj &actual,
- const char* msg) {
- const bool bad = expected.woCompare( actual );
- if ( bad ) {
- ::mongo::log() << "want:" << expected.jsonString()
- << " size: " << expected.objsize() << endl;
- ::mongo::log() << "got :" << actual.jsonString()
- << " size: " << actual.objsize() << endl;
- ::mongo::log() << expected.hexDump() << endl;
- ::mongo::log() << actual.hexDump() << endl;
- ::mongo::log() << msg << endl;
- ::mongo::log() << "orig json:" << this->json();
- }
- ASSERT( !bad );
- }
- };
-
- class Bad {
- public:
- virtual ~Bad() {}
- void run() {
- ASSERT_THROWS( fromjson( json() ), MsgAssertionException );
- }
- protected:
- virtual string json() const = 0;
- };
-
- class Empty : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- return b.obj();
- }
- virtual string json() const {
- return "{}";
- }
- };
-
- class EmptyWithSpace : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- return b.obj();
- }
- virtual string json() const {
- return "{ }";
- }
- };
-
- class SingleString : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "b" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"b\" }";
- }
- };
-
- class EmptyStrings : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"\" : \"\" }";
- }
- };
-
- class ReservedFieldName : public Bad {
- virtual string json() const {
- return "{ \"$oid\" : \"b\" }";
- }
- };
-
- class ReservedFieldName1 : public Bad {
- virtual string json() const {
- return "{ \"$ref\" : \"b\" }";
- }
- };
-
- class NumberFieldName : public Bad {
- virtual string json() const {
- return "{ 0 : \"b\" }";
- }
- };
-
- class InvalidFieldName : public Bad {
- virtual string json() const {
- return "{ test.test : \"b\" }";
- }
- };
-
- class QuotedNullName : public Bad {
- virtual string json() const {
- return "{ \"nc\0nc\" : \"b\" }";
- }
- };
-
- class NoValue : public Bad {
- virtual string json() const {
- return "{ a : }";
- }
- };
-
- class InvalidValue : public Bad {
- virtual string json() const {
- return "{ a : a }";
- }
- };
-
- class OkDollarFieldName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "$where", 1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"$where\" : 1 }";
- }
- };
-
- class SingleNumber : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : 1 }";
- }
- };
-
- class RealNumber : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", strtod( "0.7", 0 ) );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : 0.7 }";
- }
- };
-
- class FancyNumber : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", strtod( "-4.4433e-2", 0 ) );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : -4.4433e-2 }";
- }
- };
-
- class TwoElements : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- b.append( "b", "foo" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : 1, \"b\" : \"foo\" }";
- }
- };
-
- class Subobject : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- BSONObjBuilder c;
- c.append( "z", b.done() );
- return c.obj();
- }
- virtual string json() const {
- return "{ \"z\" : { \"a\" : 1 } }";
- }
- };
-
- class DeeplyNestedObject : public Base {
- virtual string buildJson(int depth) const {
- if (depth == 0) {
- return "{\"0\":true}";
- }
- else {
- std::stringstream ss;
- ss << "{\"" << depth << "\":" << buildJson(depth - 1) << "}";
- depth--;
- return ss.str();
- }
- }
- virtual BSONObj buildBson(int depth) const {
- BSONObjBuilder builder;
- if (depth == 0) {
- builder.append( "0", true );
- return builder.obj();
- }
- else {
- std::stringstream ss;
- ss << depth;
- depth--;
- builder.append(ss.str(), buildBson(depth));
- return builder.obj();
- }
- }
- virtual BSONObj bson() const {
- return buildBson(35);
- }
- virtual string json() const {
- return buildJson(35);
- }
- };
-
- class ArrayEmpty : public Base {
- virtual BSONObj bson() const {
- vector< int > arr;
- BSONObjBuilder b;
- b.append( "a", arr );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : [] }";
- }
- };
-
- class TopLevelArrayEmpty : public Base {
- virtual BSONObj bson() const {
- return BSONArray();
- }
- virtual string json() const {
- return "[]";
- }
- };
-
- class TopLevelArray : public Base {
- virtual BSONObj bson() const {
- BSONArrayBuilder builder;
- builder.append(123);
- builder.append("abc");
- return builder.arr();
- }
- virtual string json() const {
- return "[ 123, \"abc\" ]";
- }
- };
-
- class Array : public Base {
- virtual BSONObj bson() const {
- vector< int > arr;
- arr.push_back( 1 );
- arr.push_back( 2 );
- arr.push_back( 3 );
- BSONObjBuilder b;
- b.append( "a", arr );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : [ 1, 2, 3 ] }";
- }
- };
-
- class True : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendBool( "a", true );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : true }";
- }
- };
-
- class False : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendBool( "a", false );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : false }";
- }
- };
-
- class Null : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNull( "a" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : null }";
- }
- };
-
- class Undefined : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendUndefined( "a" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : undefined }";
- }
- };
-
- class UndefinedStrict : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendUndefined( "a" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$undefined\" : true } }";
- }
- };
-
- class UndefinedStrictBad : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$undefined\" : false } }";
- }
- };
-
- class EscapedCharacters : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "\" \\ / \b \f \n \r \t \v" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t \\v\" }";
- }
- };
-
- class NonEscapedCharacters : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "% { a z $ # ' " );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\% \\{ \\a \\z \\$ \\# \\' \\ \" }";
- }
- };
-
- class AllowedControlCharacter : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "\x7f" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\x7f\" }";
- }
- };
-
- class InvalidControlCharacter : public Bad {
- virtual string json() const {
- return "{ \"a\" : \"\x1f\" }";
- }
- };
-
- class NumbersInFieldName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "b1", "b" );
- return b.obj();
- }
- virtual string json() const {
- return "{ b1 : \"b\" }";
- }
- };
-
- class EscapeFieldName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "\n", "b" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"\\n\" : \"b\" }";
- }
- };
-
- class EscapedUnicodeToUtf8 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char u[ 7 ];
- u[ 0 ] = 0xe0 | 0x0a;
- u[ 1 ] = 0x80;
- u[ 2 ] = 0x80;
- u[ 3 ] = 0xe0 | 0x0a;
- u[ 4 ] = 0x80;
- u[ 5 ] = 0x80;
- u[ 6 ] = 0;
- b.append( "a", (char *) u );
- BSONObj built = b.obj();
- ASSERT_EQUALS( string( (char *) u ), built.firstElement().valuestr() );
- return built;
- }
- virtual string json() const {
- return "{ \"a\" : \"\\ua000\\uA000\" }";
- }
- };
-
- class Utf8AllOnes : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char u[ 8 ];
- u[ 0 ] = 0x01;
-
- u[ 1 ] = 0x7f;
-
- u[ 2 ] = 0xdf;
- u[ 3 ] = 0xbf;
-
- u[ 4 ] = 0xef;
- u[ 5 ] = 0xbf;
- u[ 6 ] = 0xbf;
-
- u[ 7 ] = 0;
-
- b.append( "a", (char *) u );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
- }
- };
-
- class Utf8FirstByteOnes : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char u[ 6 ];
- u[ 0 ] = 0xdc;
- u[ 1 ] = 0x80;
-
- u[ 2 ] = 0xef;
- u[ 3 ] = 0xbc;
- u[ 4 ] = 0x80;
-
- u[ 5 ] = 0;
-
- b.append( "a", (char *) u );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\u0700\\uff00\" }";
- }
- };
-
- class Utf8Invalid : public Bad {
- virtual string json() const {
- return "{ \"a\" : \"\\u0ZZZ\" }";
- }
- };
-
- class Utf8TooShort : public Bad {
- virtual string json() const {
- return "{ \"a\" : \"\\u000\" }";
- }
- };
-
- class DBRefConstructor : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", "000000000000000000000000");
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Dbref( \"ns\", \"000000000000000000000000\" ) }";
- }
- };
-
- // Added for consistency with the mongo shell
- class DBRefConstructorCapitals : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", "000000000000000000000000");
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : DBRef( \"ns\", \"000000000000000000000000\" ) }";
- }
- };
-
- class DBRefConstructorDbName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", "000000000000000000000000");
- subBuilder.append("$db", "dbname");
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Dbref( \"ns\", \"000000000000000000000000\", \"dbname\" ) }";
- }
- };
-
- class DBRefConstructorNumber : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", 1);
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Dbref( \"ns\", 1 ) }";
- }
- };
-
- class DBRefConstructorObject : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- BSONObjBuilder idSubBuilder(subBuilder.subobjStart("$id"));
- idSubBuilder.append("b", true);
- idSubBuilder.done();
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Dbref( \"ns\", { \"b\" : true } ) }";
- }
- };
-
- class DBRefNumberId : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", 1);
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : 1 } }";
- }
- };
-
- class DBRefObjectAsId : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- BSONObjBuilder idSubBuilder(subBuilder.subobjStart("$id"));
- idSubBuilder.append("b", true);
- idSubBuilder.done();
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : { \"b\" : true } } }";
- }
- };
-
- class DBRefStringId : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", "000000000000000000000000");
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : \"000000000000000000000000\" } }";
- }
- };
-
- class DBRefObjectIDObject : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- OID o;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", o);
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : { \"$oid\" : \"000000000000000000000000\" } } }";
- }
- };
-
- class DBRefObjectIDConstructor : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- OID o;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", o);
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : ObjectId( \"000000000000000000000000\" ) } }";
- }
- };
-
- class DBRefDbName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", "000000000000000000000000");
- subBuilder.append("$db", "dbname");
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : \"000000000000000000000000\""
- ", \"$db\" : \"dbname\" } }";
- }
- };
-
- class Oid : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendOID( "_id" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"_id\" : { \"$oid\" : \"000000000000000000000000\" } }";
- }
- };
-
- class Oid2 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- char OIDbytes[OID::kOIDSize];
- memset( &OIDbytes, 0x0f, OID::kOIDSize );
- OID o = OID::from(OIDbytes);
- b.appendOID( "_id", &o );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0f\" ) }";
- }
- };
-
- class OidTooLong : public Bad {
- virtual string json() const {
- return "{ \"_id\" : { \"$oid\" : \"0000000000000000000000000\" } }";
- }
- };
-
- class Oid2TooLong : public Bad {
- virtual string json() const {
- return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0f0\" ) }";
- }
- };
-
- class OidTooShort : public Bad {
- virtual string json() const {
- return "{ \"_id\" : { \"$oid\" : \"00000000000000000000000\" } }";
- }
- };
-
- class Oid2TooShort : public Bad {
- virtual string json() const {
- return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0\" ) }";
- }
- };
-
- class OidInvalidChar : public Bad {
- virtual string json() const {
- return "{ \"_id\" : { \"$oid\" : \"00000000000Z000000000000\" } }";
- }
- };
-
- class Oid2InvalidChar : public Bad {
- virtual string json() const {
- return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0fZf0f0f0f0f0f\" ) }";
- }
- };
-
- class StringId : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("_id", "000000000000000000000000");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"_id\" : \"000000000000000000000000\" }";
- }
- };
-
- class BinData : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinData1 : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, Function, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"01\" } }";
- }
- };
-
- class BinData2 : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, ByteArrayDeprecated, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"02\" } }";
- }
- };
-
- class BinData3 : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, bdtUUID, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"03\" } }";
- }
- };
-
- class BinData4 : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, newUUID, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"04\" } }";
- }
- };
-
- class BinData5 : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, MD5Type, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"05\" } }";
- }
- };
-
- class BinData80 : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, bdtCustom, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"80\" } }";
- }
- };
-
- class BinDataPaddedSingle : public Base {
- virtual BSONObj bson() const {
- char z[ 2 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- BSONObjBuilder b;
- b.appendBinData( "a", 2, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataPaddedDouble : public Base {
- virtual BSONObj bson() const {
- char z[ 1 ];
- z[ 0 ] = 'a';
- BSONObjBuilder b;
- b.appendBinData( "a", 1, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataAllChars : public Base {
- virtual BSONObj bson() const {
- unsigned char z[] = {
- 0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
- 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
- 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
- 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
- 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
- };
- BSONObjBuilder b;
- b.appendBinData( "a", 48, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataBadLength : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQ=\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataBadLength1 : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQ\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataBadLength2 : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQX==\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataBadLength3 : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQX\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataBadLength4 : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQXZ=\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataBadLength5 : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQXZ==\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataBadChars : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"a...\", \"$type\" : \"00\" } }";
- }
- };
-
- class BinDataTypeTooShort : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"0\" } }";
- }
- };
-
- class BinDataTypeTooLong : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"000\" } }";
- }
- };
-
- class BinDataTypeBadChars : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"ZZ\" } }";
- }
- };
-
- class BinDataEmptyType : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"\" } }";
- }
- };
-
- class BinDataNoType : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"AAAA\" } }";
- }
- };
-
- class BinDataInvalidType : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"100\" } }";
- }
- };
-
- class Date : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", 0 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$date\" : 0 } }";
- }
- };
-
- class DateNegZero : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", -0 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$date\" : -0 } }";
- }
- };
-
- class DateNonzero : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", 1000000000 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$date\" : 1000000000 } }";
- }
- };
-
- class DateStrictTooLong : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$date\" : " << ~(0ULL) << "1" << " } }";
- return ss.str();
- }
- };
-
- class DateTooLong : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : Date( " << ~(0ULL) << "1" << " ) }";
- return ss.str();
- }
- };
-
- class DateIsString : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$date\" : \"100\" } }";
- return ss.str();
- }
- };
-
- class DateIsString1 : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : Date(\"a\") }";
- return ss.str();
- }
- };
-
- class DateIsString2 : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : new Date(\"a\") }";
- return ss.str();
- }
- };
-
- class DateIsFloat : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$date\" : 1.1 } }";
- return ss.str();
- }
- };
-
- class DateIsFloat1 : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : Date(1.1) }";
- return ss.str();
- }
- };
-
- class DateIsFloat2 : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : new Date(1.1) }";
- return ss.str();
- }
- };
-
- class DateIsExponent : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$date\" : 10e3 } }";
- return ss.str();
- }
- };
-
- class DateIsExponent1 : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : Date(10e3) }";
- return ss.str();
+ errno_t ret = _putenv_s("TZ", "");
+ if (0 != ret) {
+ StringBuilder sb;
+ sb << "Error unsetting TZ environment variable. Error code: " << ret;
+ FAIL(sb.str());
}
- };
-
- class DateIsExponent2 : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : new Date(10e3) }";
- return ss.str();
- }
- };
- /* Need to handle this because jsonString outputs the value of Date_t as unsigned.
- * See SERVER-8330 and SERVER-8573 */
- class DateStrictMaxUnsigned : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", -1 );
- return b.obj();
- }
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$date\" : "
- << std::numeric_limits<unsigned long long>::max() << " } }";
- return ss.str();
- }
- };
-
- class DateMaxUnsigned : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", -1 );
- return b.obj();
- }
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : Date( "
- << std::numeric_limits<unsigned long long>::max() << " ) }";
- return ss.str();
- }
- };
-
- class DateStrictNegative : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", -1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$date\" : -1 } }";
- }
- };
-
- class DateNegative : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", -1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Date( -1 ) }";
- }
- };
-
- class NumberLongTest : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNumber("a", 20000LL);
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : NumberLong( 20000 ) }";
- }
- };
-
- class NumberLongMin : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNumber("a", std::numeric_limits<long long>::min());
- return b.obj();
- }
- virtual string json() const {
- std::stringstream ss;
- ss << "{'a': NumberLong(";
- ss << std::numeric_limits<long long>::min() ;
- ss << ") }";
- return ss.str();
- }
- };
-
- class NumberIntTest : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNumber("a", 20000);
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : NumberInt( 20000 ) }";
- }
- };
-
- class NumberLongNeg : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNumber("a", -20000LL);
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : NumberLong( -20000 ) }";
- }
- };
-
- class NumberIntNeg : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNumber("a", -20000);
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : NumberInt( -20000 ) }";
- }
- };
-
- class NumberLongBad : public Bad {
- virtual string json() const {
- return "{ \"a\" : NumberLong( 'sdf' ) }";
- }
- };
-
- class NumberIntBad : public Bad {
- virtual string json() const {
- return "{ \"a\" : NumberInt( 'sdf' ) }";
- }
- };
-
- class Timestamp : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendTimestamp( "a", (unsigned long long) 20000, 5 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Timestamp( 20, 5 ) }";
- }
- };
-
- class TimestampNoIncrement : public Bad {
- virtual string json() const {
- return "{ \"a\" : Timestamp( 20 ) }";
- }
- };
-
- class TimestampZero : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendTimestamp( "a", 0ULL, 0 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Timestamp( 0, 0 ) }";
- }
- };
-
- class TimestampNoArgs : public Bad {
- virtual string json() const {
- return "{ \"a\" : Timestamp() }";
- }
- };
-
- class TimestampFloatSeconds : public Bad {
- virtual string json() const {
- return "{ \"a\" : Timestamp( 20.0, 1 ) }";
- }
- };
-
- class TimestampFloatIncrement : public Bad {
- virtual string json() const {
- return "{ \"a\" : Timestamp( 20, 1.0 ) }";
- }
- };
-
- class TimestampNegativeSeconds : public Bad {
- virtual string json() const {
- return "{ \"a\" : Timestamp( -20, 5 ) }";
- }
- };
-
- class TimestampNegativeIncrement : public Bad {
- virtual string json() const {
- return "{ \"a\" : Timestamp( 20, -5 ) }";
- }
- };
-
- class TimestampInvalidSeconds : public Bad {
- virtual string json() const {
- return "{ \"a\" : Timestamp( q, 5 ) }";
- }
- };
-
- class TimestampObject : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendTimestamp( "a", (unsigned long long) 20000, 5 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20 , \"i\" : 5 } } }";
- }
- };
-
- class TimestampObjectInvalidFieldName : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"time\" : 20 , \"increment\" : 5 } } }";
- }
- };
-
- class TimestampObjectNoIncrement : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20 } } }";
- }
- };
-
- class TimestampObjectNegativeSeconds : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : -20 , \"i\" : 5 } } }";
- }
- };
-
- class TimestampObjectNegativeIncrement : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20 , \"i\" : -5 } } }";
- }
- };
-
- class TimestampObjectInvalidSeconds : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : q , \"i\" : 5 } } }";
- }
- };
-
- class TimestampObjectZero : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendTimestamp( "a", 0ULL, 0 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : 0, \"i\" : 0} } }";
- }
- };
-
- class TimestampObjectNoArgs : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { } } }";
- }
- };
-
- class TimestampObjectFloatSeconds : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : 1.0, \"i\" : 0} } }";
- }
- };
-
- class TimestampObjectFloatIncrement : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20, \"i\" : 1.0} } }";
- }
- };
-
- class Regex : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "b", "i" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"i\" } }";
- }
- };
-
- class RegexNoOptionField : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "b", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\" } }";
- }
- };
-
- class RegexEscape : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "\t", "i" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"\\t\", \"$options\" : \"i\" } }";
- }
- };
-
- class RegexWithQuotes : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "\"", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : /\"/ }";
- }
- };
-
- class RegexWithQuotes1 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "\"", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { $regex : \"\\\"\" }}";
- }
- };
-
- class RegexInvalidField : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\", \"field\" : \"i\" } }";
- }
- };
-
- class RegexInvalidOption : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"1\" } }";
- }
- };
-
- class RegexInvalidOption2 : public Bad {
- virtual string json() const {
- return "{ \"a\" : /b/c }";
- }
- };
-
- class RegexInvalidOption3 : public Bad {
- virtual string json() const {
- return "{ \"a\" : /b/ic }";
- }
- };
-
- class RegexInvalidOption4 : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"a\" } }";
- }
- };
-
- class RegexInvalidOption5 : public Bad {
- virtual string json() const {
- return "{ \"a\" : /b/a }";
- }
- };
-
- class RegexEmptyOption : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "b", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"\" } }";
- }
- };
-
- class RegexEmpty : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex("a", "", "");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"\", \"$options\" : \"\"} }";
- }
- };
-
- class RegexEmpty1 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex("a", "", "");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : // }";
- }
- };
-
- class RegexOverlap : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : // } }";
- }
- };
-
- class Malformed : public Bad {
- string json() const {
- return "{";
- }
- };
-
- class Malformed1 : public Bad {
- string json() const {
- return "}";
- }
- };
-
- class Malformed2 : public Bad {
- string json() const {
- return "{test}";
- }
- };
-
- class Malformed3 : public Bad {
- string json() const {
- return "{test";
- }
- };
-
- class Malformed4 : public Bad {
- string json() const {
- return "{ test : 1";
- }
- };
-
- class Malformed5 : public Bad {
- string json() const {
- return "{ test : 1 , }";
- }
- };
-
- class Malformed6 : public Bad {
- string json() const {
- return "{ test : 1 , tst}";
- }
- };
-
- class Malformed7 : public Bad {
- string json() const {
- return "{ a : []";
- }
- };
-
- class Malformed8 : public Bad {
- string json() const {
- return "{ a : { test : 1 }";
- }
- };
-
- class Malformed9 : public Bad {
- string json() const {
- return "{ a : [ { test : 1]}";
- }
- };
-
- class Malformed10 : public Bad {
- string json() const {
- return "{ a : [ { test : 1], b : 2}";
- }
- };
-
- class Malformed11 : public Bad {
- string json() const {
- return "{ a : \"test\"string }";
- }
- };
-
- class Malformed12 : public Bad {
- string json() const {
- return "{ a : test\"string\" }";
- }
- };
-
- class Malformed13 : public Bad {
- string json() const {
- return "{ a\"bad\" : \"teststring\" }";
- }
- };
-
- class Malformed14 : public Bad {
- string json() const {
- return "{ \"a\"test : \"teststring\" }";
- }
- };
-
- class Malformed15 : public Bad {
- string json() const {
- return "{ \"atest : \"teststring\" }";
- }
- };
-
- class Malformed16 : public Bad {
- string json() const {
- return "{ atest\" : \"teststring\" }";
- }
- };
-
- class Malformed17 : public Bad {
- string json() const {
- return "{ atest\" : 1 }";
- }
- };
-
- class Malformed18 : public Bad {
- string json() const {
- return "{ atest : \"teststring }";
- }
- };
-
- class Malformed19 : public Bad {
- string json() const {
- return "{ atest : teststring\" }";
- }
- };
-
- class UnquotedFieldName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a_b", 1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ a_b : 1 }";
- }
- };
-
- class UnquotedFieldNameBad : public Bad {
- string json() const {
- return "{ 123 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad1 : public Bad {
- string json() const {
- return "{ -123 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad2 : public Bad {
- string json() const {
- return "{ .123 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad3 : public Bad {
- string json() const {
- return "{ -.123 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad4 : public Bad {
- string json() const {
- return "{ -1.23 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad5 : public Bad {
- string json() const {
- return "{ 1e23 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad6 : public Bad {
- string json() const {
- return "{ -1e23 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad7 : public Bad {
- string json() const {
- return "{ -1e-23 : 1 }";
- }
- };
-
- class UnquotedFieldNameBad8 : public Bad {
- string json() const {
- return "{ -hello : 1 }";
- }
- };
-
- class UnquotedFieldNameBad9 : public Bad {
- string json() const {
- return "{ il.legal : 1 }";
- }
- };
-
- class UnquotedFieldNameBad10 : public Bad {
- string json() const {
- return "{ 10gen : 1 }";
- }
- };
-
- class UnquotedFieldNameBad11 : public Bad {
- string json() const {
- return "{ _123. : 1 }";
- }
- };
-
- class UnquotedFieldNameBad12 : public Bad {
- string json() const {
- return "{ he-llo : 1 }";
- }
- };
-
- class UnquotedFieldNameBad13 : public Bad {
- string json() const {
- return "{ bad\nchar : 1 }";
- }
- };
-
- class UnquotedFieldNameBad14 : public Bad {
- string json() const {
- return "{ thiswill\fail : 1 }";
- }
- };
-
- class UnquotedFieldNameBad15 : public Bad {
- string json() const {
- return "{ failu\re : 1 }";
- }
- };
-
- class UnquotedFieldNameBad16 : public Bad {
- string json() const {
- return "{ t\test : 1 }";
- }
- };
-
- class UnquotedFieldNameBad17 : public Bad {
- string json() const {
- return "{ \break: 1 }";
- }
- };
-
- class UnquotedFieldNameBad18 : public Bad {
- string json() const {
- //here we fill the memory directly to test unicode values
- //In this case we set \u0700 and \uFF00
- //Setting it directly in memory avoids MSVC error c4566
- unsigned char u[ 6 ];
- u[ 0 ] = 0xdc;
- u[ 1 ] = 0x80;
-
- u[ 2 ] = 0xef;
- u[ 3 ] = 0xbc;
- u[ 4 ] = 0x80;
-
- u[ 5 ] = 0;
- std::stringstream ss;
- ss << "{ " << u << " : 1 }";
- return ss.str();
- }
- };
-
- class UnquotedFieldNameBad19 : public Bad {
- string json() const {
- return "{ bl\\u3333p: 1 }";
- }
- };
-
- class UnquotedFieldNameBad20 : public Bad {
- string json() const {
- return "{ bl-33p: 1 }";
- }
- };
-
- class UnquotedFieldNameDollar : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "$a_b", 1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ $a_b : 1 }";
- }
- };
-
- class SingleQuotes : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "ab'c\"", "bb\b '\"" );
- return b.obj();
- }
- virtual string json() const {
- return "{ 'ab\\'c\"' : 'bb\\b \\'\"' }";
- }
- };
-
- class QuoteTest : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("\"", "test");
- return b.obj();
- }
- virtual string json() const {
- return "{ '\"' : \"test\" }";
- }
- };
-
- class QuoteTest1 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("'", "test");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"'\" : \"test\" }";
- }
- };
-
- class QuoteTest2 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("\"", "test");
- return b.obj();
- }
- virtual string json() const {
- return "{ '\"' : \"test\" }";
- }
- };
-
- class QuoteTest3 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("\"'\"", "test");
- return b.obj();
- }
- virtual string json() const {
- return "{ '\"\\\'\"' : \"test\" }";
- }
- };
-
- class QuoteTest4 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("'\"'", "test");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"'\\\"'\" : \"test\" }";
- }
- };
-
- class QuoteTest5 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("test", "'");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"test\" : \"'\" }";
- }
- };
-
- class QuoteTest6 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append("test", "\"");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"test\" : '\"' }";
- }
- };
-
- class ObjectId : public Base {
- virtual BSONObj bson() const {
- OID id;
- id.init( "deadbeeff00ddeadbeeff00d" );
- BSONObjBuilder b;
- b.appendOID( "_id", &id );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"_id\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
- }
- };
-
- class ObjectId2 : public Base {
- virtual BSONObj bson() const {
- OID id;
- id.init( "deadbeeff00ddeadbeeff00d" );
- BSONObjBuilder b;
- b.appendOID( "foo", &id );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"foo\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
- }
- };
-
- class NumericTypes : public Base {
- public:
- void run() {
- Base::run();
-
- BSONObj o = fromjson(json());
-
- ASSERT(o["int"].type() == NumberInt);
- ASSERT(o["long"].type() == NumberLong);
- ASSERT(o["double"].type() == NumberDouble);
-
- ASSERT(o["long"].numberLong() == 9223372036854775807ll);
- }
-
- virtual BSONObj bson() const {
- return BSON( "int" << 123
- << "long" << 9223372036854775807ll // 2**63 - 1
- << "double" << 3.14
- );
- }
- virtual string json() const {
- return "{ \"int\": 123, \"long\": 9223372036854775807, \"double\": 3.14 }";
- }
- };
-
- class NumericTypesJS : public Base {
- public:
- void run() {
- Base::run();
-
- BSONObj o = fromjson(json());
-
- ASSERT(o["int"].type() == NumberInt);
- ASSERT(o["long"].type() == NumberLong);
- ASSERT(o["double"].type() == NumberDouble);
-
- ASSERT(o["long"].numberLong() == 9223372036854775807ll);
- }
-
- virtual BSONObj bson() const {
- return BSON( "int" << 123
- << "long" << 9223372036854775807ll // 2**63 - 1
- << "double" << 3.14
- );
- }
- virtual string json() const {
- return "{ 'int': NumberInt(123), "
- "'long': NumberLong(9223372036854775807), "
- "'double': 3.14 }";
- }
- };
-
- class NumericLongMin : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNumber("a", std::numeric_limits<long long>::min());
- return b.obj();
- }
- virtual string json() const {
- std::stringstream ss;
- ss << "{'a': ";
- ss << std::numeric_limits<long long>::min() ;
- ss << " }";
- return ss.str();
- }
- };
-
- class NumericIntMin : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNumber("a", std::numeric_limits<int>::min());
- return b.obj();
- }
- virtual string json() const {
- std::stringstream ss;
- ss << "{'a': ";
- ss << std::numeric_limits<int>::min() ;
- ss << " }";
- return ss.str();
- }
- };
-
-
- class NumericLimits : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder builder;
- BSONArrayBuilder numArray(builder.subarrayStart(""));
- numArray.append(std::numeric_limits<long long>::max());
- numArray.append(std::numeric_limits<long long>::min());
- numArray.append(std::numeric_limits<int>::max());
- numArray.append(std::numeric_limits<int>::min());
- numArray.done();
- return builder.obj();
- }
- virtual string json() const {
- std::stringstream ss;
- ss << "{'': [";
- ss << std::numeric_limits<long long>::max() << ",";
- ss << std::numeric_limits<long long>::min() << ",";
- ss << std::numeric_limits<int>::max() << ",";
- ss << std::numeric_limits<int>::min();
- ss << "] }";
- return ss.str();
- }
- };
-
- //Overflows double by giving it an exponent that is too large
- class NumericLimitsBad : public Bad {
- virtual string json() const {
- std::stringstream ss;
- ss << "{ test : ";
- ss << std::numeric_limits<double>::max() << "1111111111";
- ss << "}";
- return ss.str();
- }
- };
-
- class NumericLimitsBad1 : public Bad {
- virtual string json() const {
- std::stringstream ss;
- ss << "{ test : ";
- ss << std::numeric_limits<double>::min() << "11111111111";
- ss << "}";
- return ss.str();
- }
- };
-
- class NegativeNumericTypes : public Base {
- public:
- void run() {
- Base::run();
-
- BSONObj o = fromjson(json());
-
- ASSERT(o["int"].type() == NumberInt);
- ASSERT(o["long"].type() == NumberLong);
- ASSERT(o["double"].type() == NumberDouble);
-
- ASSERT(o["long"].numberLong() == -9223372036854775807ll);
- }
-
- virtual BSONObj bson() const {
- return BSON( "int" << -123
- << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
- << "double" << -3.14
- );
- }
- virtual string json() const {
- return "{ \"int\": -123, \"long\": -9223372036854775807, \"double\": -3.14 }";
- }
- };
-
- class EmbeddedDatesBase : public Base {
- public:
-
- virtual void run() {
- BSONObj o = fromjson( json() );
- ASSERT_EQUALS( 3 , (o["time.valid"].type()) );
- BSONObj e = o["time.valid"].embeddedObjectUserCheck();
- ASSERT_EQUALS( 9 , e["$gt"].type() );
- ASSERT_EQUALS( 9 , e["$lt"].type() );
- Base::run();
- }
-
- BSONObj bson() const {
- BSONObjBuilder e;
- e.appendDate( "$gt" , 1257829200000LL );
- e.appendDate( "$lt" , 1257829200100LL );
-
- BSONObjBuilder b;
- b.append( "time.valid" , e.obj() );
- return b.obj();
- }
- virtual string json() const = 0;
- };
-
- struct EmbeddedDatesFormat1 : EmbeddedDatesBase {
- string json() const {
- return "{ \"time.valid\" : { $gt : { \"$date\" : 1257829200000 } , $lt : { \"$date\" : 1257829200100 } } }";
- }
- };
- struct EmbeddedDatesFormat2 : EmbeddedDatesBase {
- string json() const {
- return "{ \"time.valid\" : { $gt : Date(1257829200000) , $lt : Date( 1257829200100 ) } }";
- }
- };
- struct EmbeddedDatesFormat3 : EmbeddedDatesBase {
- string json() const {
- return "{ \"time.valid\" : { $gt : new Date(1257829200000) , $lt : new Date( 1257829200100 ) } }";
- }
- };
-
- class NullString : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "x" , "a\0b" , 4 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"x\" : \"a\\u0000b\" }";
- }
- };
-
- class NullFieldUnquoted : public Bad {
- virtual string json() const {
- return "{ x\\u0000y : \"a\" }";
- }
- };
-
- class MinKeyAlone : public Bad {
- virtual string json() const {
- return "{ \"$minKey\" : 1 }";
- }
- };
-
- class MaxKeyAlone : public Bad {
- virtual string json() const {
- return "{ \"$maxKey\" : 1 }";
- }
- };
-
- class MinKey : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendMinKey("a");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$minKey\" : 1 } }";
- }
- };
-
- class MaxKey : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendMaxKey("a");
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$maxKey\" : 1 } }";
+#else
+ if (-1 == unsetenv("TZ")) {
+ FAIL(errnoWithDescription());
}
- };
-
- } // namespace FromJsonTests
-
- class All : public Suite {
- public:
- All() : Suite( "json" ) {
+#endif
}
-
- void setupTests() {
- add< JsonStringTests::Empty >();
- add< JsonStringTests::SingleStringMember >();
- add< JsonStringTests::EscapedCharacters >();
- add< JsonStringTests::AdditionalControlCharacters >();
- add< JsonStringTests::ExtendedAscii >();
- add< JsonStringTests::EscapeFieldName >();
- add< JsonStringTests::SingleIntMember >();
- add< JsonStringTests::SingleNumberMember >();
- add< JsonStringTests::InvalidNumbers >();
- add< JsonStringTests::NumberPrecision >();
- add< JsonStringTests::NegativeNumber >();
- add< JsonStringTests::NumberLongStrict >();
- add< JsonStringTests::NumberLongStrictLarge >();
- add< JsonStringTests::NumberLongStrictNegative >();
- add< JsonStringTests::NumberDoubleNaN >();
- add< JsonStringTests::NumberDoubleInfinity >();
- add< JsonStringTests::NumberDoubleNegativeInfinity >();
- add< JsonStringTests::SingleBoolMember >();
- add< JsonStringTests::SingleNullMember >();
- add< JsonStringTests::SingleUndefinedMember >();
- add< JsonStringTests::SingleObjectMember >();
- add< JsonStringTests::TwoMembers >();
- add< JsonStringTests::EmptyArray >();
- add< JsonStringTests::Array >();
- add< JsonStringTests::DBRef >();
- add< JsonStringTests::DBRefZero >();
- add< JsonStringTests::ObjectId >();
- add< JsonStringTests::BinData >();
- add< JsonStringTests::Symbol >();
- add< JsonStringTests::Date >();
- add< JsonStringTests::DateNegative >();
- add< JsonStringTests::Regex >();
- add< JsonStringTests::RegexEscape >();
- add< JsonStringTests::RegexManyOptions >();
- add< JsonStringTests::CodeTests >();
- add< JsonStringTests::CodeWScopeTests >();
- add< JsonStringTests::TimestampTests >();
- add< JsonStringTests::NullString >();
- add< JsonStringTests::AllTypes >();
-
- add< FromJsonTests::Empty >();
- add< FromJsonTests::EmptyWithSpace >();
- add< FromJsonTests::SingleString >();
- add< FromJsonTests::EmptyStrings >();
- add< FromJsonTests::ReservedFieldName >();
- add< FromJsonTests::ReservedFieldName1 >();
- add< FromJsonTests::NumberFieldName >();
- add< FromJsonTests::InvalidFieldName >();
- add< FromJsonTests::QuotedNullName >();
- add< FromJsonTests::NoValue >();
- add< FromJsonTests::InvalidValue >();
- add< FromJsonTests::InvalidValue >();
- add< FromJsonTests::OkDollarFieldName >();
- add< FromJsonTests::SingleNumber >();
- add< FromJsonTests::RealNumber >();
- add< FromJsonTests::FancyNumber >();
- add< FromJsonTests::TwoElements >();
- add< FromJsonTests::Subobject >();
- add< FromJsonTests::DeeplyNestedObject >();
- add< FromJsonTests::ArrayEmpty >();
- add< FromJsonTests::TopLevelArrayEmpty >();
- add< FromJsonTests::TopLevelArray >();
- add< FromJsonTests::Array >();
- add< FromJsonTests::True >();
- add< FromJsonTests::False >();
- add< FromJsonTests::Null >();
- add< FromJsonTests::Undefined >();
- add< FromJsonTests::UndefinedStrict >();
- add< FromJsonTests::UndefinedStrictBad >();
- add< FromJsonTests::EscapedCharacters >();
- add< FromJsonTests::NonEscapedCharacters >();
- add< FromJsonTests::AllowedControlCharacter >();
- add< FromJsonTests::InvalidControlCharacter >();
- add< FromJsonTests::NumbersInFieldName >();
- add< FromJsonTests::EscapeFieldName >();
- add< FromJsonTests::EscapedUnicodeToUtf8 >();
- add< FromJsonTests::Utf8AllOnes >();
- add< FromJsonTests::Utf8FirstByteOnes >();
- add< FromJsonTests::Utf8Invalid >();
- add< FromJsonTests::Utf8TooShort >();
- add< FromJsonTests::DBRefConstructor >();
- add< FromJsonTests::DBRefConstructorCapitals >();
- add< FromJsonTests::DBRefConstructorDbName >();
- add< FromJsonTests::DBRefConstructorNumber >();
- add< FromJsonTests::DBRefConstructorObject >();
- add< FromJsonTests::DBRefNumberId >();
- add< FromJsonTests::DBRefObjectAsId >();
- add< FromJsonTests::DBRefStringId >();
- add< FromJsonTests::DBRefObjectIDObject >();
- add< FromJsonTests::DBRefObjectIDConstructor >();
- add< FromJsonTests::DBRefDbName >();
- add< FromJsonTests::Oid >();
- add< FromJsonTests::Oid2 >();
- add< FromJsonTests::OidTooLong >();
- add< FromJsonTests::Oid2TooLong >();
- add< FromJsonTests::OidTooShort >();
- add< FromJsonTests::Oid2TooShort >();
- add< FromJsonTests::OidInvalidChar >();
- add< FromJsonTests::Oid2InvalidChar >();
- add< FromJsonTests::StringId >();
- add< FromJsonTests::BinData >();
- add< FromJsonTests::BinData1 >();
- add< FromJsonTests::BinData2 >();
- add< FromJsonTests::BinData3 >();
- add< FromJsonTests::BinData4 >();
- add< FromJsonTests::BinData5 >();
- add< FromJsonTests::BinData80 >();
- add< FromJsonTests::BinDataPaddedSingle >();
- add< FromJsonTests::BinDataPaddedDouble >();
- add< FromJsonTests::BinDataAllChars >();
- add< FromJsonTests::BinDataBadLength >();
- add< FromJsonTests::BinDataBadLength1 >();
- add< FromJsonTests::BinDataBadLength2 >();
- add< FromJsonTests::BinDataBadLength3 >();
- add< FromJsonTests::BinDataBadLength4 >();
- add< FromJsonTests::BinDataBadLength5 >();
- add< FromJsonTests::BinDataBadChars >();
- add< FromJsonTests::BinDataTypeTooShort >();
- add< FromJsonTests::BinDataTypeTooLong >();
- add< FromJsonTests::BinDataTypeBadChars >();
- add< FromJsonTests::BinDataEmptyType >();
- add< FromJsonTests::BinDataNoType >();
- add< FromJsonTests::BinDataInvalidType >();
- // DOCS-2539: We cannot parse dates generated with a Unix timestamp of zero in local
- // time, since the body of the date may be before the Unix Epoch. This causes parsing
- // to fail even if the offset would properly adjust the time. For example,
- // "1969-12-31T19:00:00-05:00" actually represents the Unix timestamp of zero, but we
- // cannot parse it because the body of the date is before 1970.
- //add< FromJsonTests::Date >();
- //add< FromJsonTests::DateNegZero >();
- add< FromJsonTests::DateNonzero >();
- add< FromJsonTests::DateStrictTooLong >();
- add< FromJsonTests::DateTooLong >();
- add< FromJsonTests::DateIsString >();
- add< FromJsonTests::DateIsString1 >();
- add< FromJsonTests::DateIsString2 >();
- add< FromJsonTests::DateIsFloat >();
- add< FromJsonTests::DateIsFloat1 >();
- add< FromJsonTests::DateIsFloat2 >();
- add< FromJsonTests::DateIsExponent >();
- add< FromJsonTests::DateIsExponent1 >();
- add< FromJsonTests::DateIsExponent2 >();
- add< FromJsonTests::DateStrictMaxUnsigned >();
- add< FromJsonTests::DateMaxUnsigned >();
- add< FromJsonTests::DateStrictNegative >();
- add< FromJsonTests::DateNegative >();
- add< FromJsonTests::NumberLongTest >();
- add< FromJsonTests::NumberLongMin >();
- add< FromJsonTests::NumberIntTest >();
- add< FromJsonTests::NumberLongNeg >();
- add< FromJsonTests::NumberIntNeg >();
- add< FromJsonTests::NumberLongBad >();
- add< FromJsonTests::NumberIntBad >();
- add< FromJsonTests::Timestamp >();
- add< FromJsonTests::TimestampNoIncrement >();
- add< FromJsonTests::TimestampZero >();
- add< FromJsonTests::TimestampNoArgs >();
- add< FromJsonTests::TimestampFloatSeconds >();
- add< FromJsonTests::TimestampFloatIncrement >();
- add< FromJsonTests::TimestampNegativeSeconds >();
- add< FromJsonTests::TimestampNegativeIncrement >();
- add< FromJsonTests::TimestampInvalidSeconds >();
- add< FromJsonTests::TimestampObject >();
- add< FromJsonTests::TimestampObjectInvalidFieldName >();
- add< FromJsonTests::TimestampObjectNoIncrement >();
- add< FromJsonTests::TimestampObjectNegativeSeconds >();
- add< FromJsonTests::TimestampObjectNegativeIncrement >();
- add< FromJsonTests::TimestampObjectInvalidSeconds >();
- add< FromJsonTests::TimestampObjectZero >();
- add< FromJsonTests::TimestampObjectNoArgs >();
- add< FromJsonTests::TimestampObjectFloatSeconds >();
- add< FromJsonTests::TimestampObjectFloatIncrement >();
- add< FromJsonTests::Regex >();
- add< FromJsonTests::RegexNoOptionField >();
- add< FromJsonTests::RegexEscape >();
- add< FromJsonTests::RegexWithQuotes >();
- add< FromJsonTests::RegexWithQuotes1 >();
- add< FromJsonTests::RegexInvalidField >();
- add< FromJsonTests::RegexInvalidOption >();
- add< FromJsonTests::RegexInvalidOption2 >();
- add< FromJsonTests::RegexInvalidOption3 >();
- add< FromJsonTests::RegexInvalidOption4 >();
- add< FromJsonTests::RegexInvalidOption5 >();
- add< FromJsonTests::RegexEmptyOption >();
- add< FromJsonTests::RegexEmpty >();
- add< FromJsonTests::RegexEmpty1 >();
- add< FromJsonTests::RegexOverlap >();
- add< FromJsonTests::Malformed >();
- add< FromJsonTests::Malformed1 >();
- add< FromJsonTests::Malformed2 >();
- add< FromJsonTests::Malformed3 >();
- add< FromJsonTests::Malformed4 >();
- add< FromJsonTests::Malformed5 >();
- add< FromJsonTests::Malformed6 >();
- add< FromJsonTests::Malformed7 >();
- add< FromJsonTests::Malformed8 >();
- add< FromJsonTests::Malformed9 >();
- add< FromJsonTests::Malformed10 >();
- add< FromJsonTests::Malformed11 >();
- add< FromJsonTests::Malformed12 >();
- add< FromJsonTests::Malformed13 >();
- add< FromJsonTests::Malformed14 >();
- add< FromJsonTests::Malformed15 >();
- add< FromJsonTests::Malformed16 >();
- add< FromJsonTests::Malformed17 >();
- add< FromJsonTests::Malformed18 >();
- add< FromJsonTests::Malformed19 >();
- add< FromJsonTests::UnquotedFieldName >();
- add< FromJsonTests::UnquotedFieldNameBad >();
- add< FromJsonTests::UnquotedFieldNameBad1 >();
- add< FromJsonTests::UnquotedFieldNameBad2 >();
- add< FromJsonTests::UnquotedFieldNameBad3 >();
- add< FromJsonTests::UnquotedFieldNameBad4 >();
- add< FromJsonTests::UnquotedFieldNameBad5 >();
- add< FromJsonTests::UnquotedFieldNameBad6 >();
- add< FromJsonTests::UnquotedFieldNameBad7 >();
- add< FromJsonTests::UnquotedFieldNameBad8 >();
- add< FromJsonTests::UnquotedFieldNameBad9 >();
- add< FromJsonTests::UnquotedFieldNameBad10 >();
- add< FromJsonTests::UnquotedFieldNameBad11 >();
- add< FromJsonTests::UnquotedFieldNameBad12 >();
- add< FromJsonTests::UnquotedFieldNameBad13 >();
- add< FromJsonTests::UnquotedFieldNameBad14 >();
- add< FromJsonTests::UnquotedFieldNameBad15 >();
- add< FromJsonTests::UnquotedFieldNameBad16 >();
- add< FromJsonTests::UnquotedFieldNameBad17 >();
- add< FromJsonTests::UnquotedFieldNameBad18 >();
- add< FromJsonTests::UnquotedFieldNameBad19 >();
- add< FromJsonTests::UnquotedFieldNameBad20 >();
- add< FromJsonTests::UnquotedFieldNameDollar >();
- add< FromJsonTests::SingleQuotes >();
- add< FromJsonTests::QuoteTest >();
- add< FromJsonTests::QuoteTest1 >();
- add< FromJsonTests::QuoteTest2 >();
- add< FromJsonTests::QuoteTest3 >();
- add< FromJsonTests::QuoteTest4 >();
- add< FromJsonTests::QuoteTest5 >();
- add< FromJsonTests::QuoteTest6 >();
- add< FromJsonTests::ObjectId >();
- add< FromJsonTests::ObjectId2 >();
- add< FromJsonTests::NumericIntMin >();
- add< FromJsonTests::NumericLongMin >();
- add< FromJsonTests::NumericTypes >();
- add< FromJsonTests::NumericTypesJS >();
- add< FromJsonTests::NumericLimits >();
- add< FromJsonTests::NumericLimitsBad >();
- add< FromJsonTests::NumericLimitsBad1 >();
- add< FromJsonTests::NegativeNumericTypes >();
- add< FromJsonTests::EmbeddedDatesFormat1 >();
- add< FromJsonTests::EmbeddedDatesFormat2 >();
- add< FromJsonTests::EmbeddedDatesFormat3 >();
- add< FromJsonTests::NullString >();
- add< FromJsonTests::NullFieldUnquoted >();
- add< FromJsonTests::MinKey >();
- add< FromJsonTests::MaxKey >();
+ tzset();
+ }
+
+ void run() {
+ BSONObjBuilder b;
+ b.appendDate("a", 0);
+ BSONObj built = b.done();
+ ASSERT_EQUALS("{ \"a\" : { \"$date\" : \"1969-12-31T19:00:00.000-0500\" } }",
+ built.jsonString(Strict));
+ ASSERT_EQUALS("{ \"a\" : Date( 0 ) }", built.jsonString(TenGen));
+ ASSERT_EQUALS("{ \"a\" : Date( 0 ) }", built.jsonString(JS));
+
+ // Test dates above our maximum formattable date. See SERVER-13760.
+ BSONObjBuilder b2;
+ b2.appendDate("a", 32535262800000ULL);
+ BSONObj built2 = b2.done();
+ ASSERT_EQUALS("{ \"a\" : { \"$date\" : { \"$numberLong\" : \"32535262800000\" } } }",
+ built2.jsonString(Strict));
+ }
+
+private:
+ std::string _oldTimezone;
+};
+
+class DateNegative {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendDate("a", -1);
+ BSONObj built = b.done();
+ ASSERT_EQUALS("{ \"a\" : { \"$date\" : { \"$numberLong\" : \"-1\" } } }",
+ built.jsonString(Strict));
+ ASSERT_EQUALS("{ \"a\" : Date( -1 ) }", built.jsonString(TenGen));
+ ASSERT_EQUALS("{ \"a\" : Date( -1 ) }", built.jsonString(JS));
+ }
+};
+
+class Regex {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex("a", "abc", "i");
+ BSONObj built = b.done();
+ ASSERT_EQUALS("{ \"a\" : { \"$regex\" : \"abc\", \"$options\" : \"i\" } }",
+ built.jsonString(Strict));
+ ASSERT_EQUALS("{ \"a\" : /abc/i }", built.jsonString(TenGen));
+ ASSERT_EQUALS("{ \"a\" : /abc/i }", built.jsonString(JS));
+ }
+};
+
+class RegexEscape {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex("a", "/\"", "i");
+ BSONObj built = b.done();
+ ASSERT_EQUALS("{ \"a\" : { \"$regex\" : \"/\\\"\", \"$options\" : \"i\" } }",
+ built.jsonString(Strict));
+ ASSERT_EQUALS("{ \"a\" : /\\/\\\"/i }", built.jsonString(TenGen));
+ ASSERT_EQUALS("{ \"a\" : /\\/\\\"/i }", built.jsonString(JS));
+ }
+};
+
+class RegexManyOptions {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex("a", "z", "abcgimx");
+ BSONObj built = b.done();
+ ASSERT_EQUALS("{ \"a\" : { \"$regex\" : \"z\", \"$options\" : \"abcgimx\" } }",
+ built.jsonString(Strict));
+ ASSERT_EQUALS("{ \"a\" : /z/gim }", built.jsonString(TenGen));
+ ASSERT_EQUALS("{ \"a\" : /z/gim }", built.jsonString(JS));
+ }
+};
+
+class CodeTests {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendCode("x", "function(arg){ var string = \"\\n\"; return 1; }");
+ BSONObj o = b.obj();
+ ASSERT_EQUALS(
+ "{ \"x\" : \"function(arg){ var string = \\\"\\\\n\\\"; "
+ "return 1; }\" }",
+ o.jsonString());
+ }
+};
+
+class CodeWScopeTests {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendCodeWScope("x", "function(arg){ var string = \"\\n\"; return x; }", BSON("x" << 1));
+ BSONObj o = b.obj();
+ ASSERT_EQUALS(
+ "{ \"x\" : "
+ "{ \"$code\" : "
+ "\"function(arg){ var string = \\\"\\\\n\\\"; return x; }\" , "
+ "\"$scope\" : { \"x\" : 1 } } }",
+ o.jsonString());
+ }
+};
+
+class TimestampTests {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendTimestamp("x", 4000, 10);
+ BSONObj o = b.obj();
+ ASSERT_EQUALS("{ \"x\" : { \"$timestamp\" : { \"t\" : 4, \"i\" : 10 } } }",
+ o.jsonString(Strict));
+ ASSERT_EQUALS("{ \"x\" : { \"$timestamp\" : { \"t\" : 4, \"i\" : 10 } } }",
+ o.jsonString(JS));
+ ASSERT_EQUALS("{ \"x\" : Timestamp( 4, 10 ) }", o.jsonString(TenGen));
+ }
+};
+
+class NullString {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.append("x", "a\0b", 4);
+ BSONObj o = b.obj();
+ ASSERT_EQUALS("{ \"x\" : \"a\\u0000b\" }", o.jsonString());
+ }
+};
+
+class AllTypes {
+public:
+ void run() {
+ OID oid;
+ oid.init();
+
+ BSONObjBuilder b;
+ b.appendMinKey("a");
+ b.append("b", 5.5);
+ b.append("c", "abc");
+ b.append("e", BSON("x" << 1));
+ b.append("f", BSON_ARRAY(1 << 2 << 3));
+ b.appendBinData("g", sizeof(AllTypes), bdtCustom, (const void*)this);
+ b.appendUndefined("h");
+ b.append("i", oid);
+ b.appendBool("j", 1);
+ b.appendDate("k", 123);
+ b.appendNull("l");
+ b.appendRegex("m", "a");
+ b.appendDBRef("n", "foo", oid);
+ b.appendCode("o", "function(){}");
+ b.appendSymbol("p", "foo");
+ b.appendCodeWScope("q", "function(){}", BSON("x" << 1));
+ b.append("r", (int)5);
+ b.appendTimestamp("s", 123123123123123LL);
+ b.append("t", 12321312312LL);
+ b.appendMaxKey("u");
+
+ BSONObj o = b.obj();
+ o.jsonString();
+ // cout << o.jsonString() << endl;
+ }
+};
+
+} // namespace JsonStringTests
+
+namespace FromJsonTests {
+
+class Base {
+public:
+ virtual ~Base() {}
+ void run() {
+ ASSERT(fromjson(json()).valid());
+ assertEquals(bson(), fromjson(tojson(bson())), "mode: <default>");
+ assertEquals(bson(), fromjson(tojson(bson(), Strict)), "mode: strict");
+ assertEquals(bson(), fromjson(tojson(bson(), TenGen)), "mode: tengen");
+ assertEquals(bson(), fromjson(tojson(bson(), JS)), "mode: js");
+ }
+
+protected:
+ virtual BSONObj bson() const = 0;
+ virtual string json() const = 0;
+
+private:
+ void assertEquals(const BSONObj& expected, const BSONObj& actual, const char* msg) {
+ const bool bad = expected.woCompare(actual);
+ if (bad) {
+ ::mongo::log() << "want:" << expected.jsonString() << " size: " << expected.objsize()
+ << endl;
+ ::mongo::log() << "got :" << actual.jsonString() << " size: " << actual.objsize()
+ << endl;
+ ::mongo::log() << expected.hexDump() << endl;
+ ::mongo::log() << actual.hexDump() << endl;
+ ::mongo::log() << msg << endl;
+ ::mongo::log() << "orig json:" << this->json();
}
- };
-
- SuiteInstance<All> myall;
-
-} // namespace JsonTests
-
+ ASSERT(!bad);
+ }
+};
+
+class Bad {
+public:
+ virtual ~Bad() {}
+ void run() {
+ ASSERT_THROWS(fromjson(json()), MsgAssertionException);
+ }
+
+protected:
+ virtual string json() const = 0;
+};
+
+class Empty : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{}";
+ }
+};
+
+class EmptyWithSpace : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ }";
+ }
+};
+
+class SingleString : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "b");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"b\" }";
+ }
+};
+
+class EmptyStrings : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\" : \"\" }";
+ }
+};
+
+class ReservedFieldName : public Bad {
+ virtual string json() const {
+ return "{ \"$oid\" : \"b\" }";
+ }
+};
+
+class ReservedFieldName1 : public Bad {
+ virtual string json() const {
+ return "{ \"$ref\" : \"b\" }";
+ }
+};
+
+class NumberFieldName : public Bad {
+ virtual string json() const {
+ return "{ 0 : \"b\" }";
+ }
+};
+
+class InvalidFieldName : public Bad {
+ virtual string json() const {
+ return "{ test.test : \"b\" }";
+ }
+};
+
+class QuotedNullName : public Bad {
+ virtual string json() const {
+ return "{ \"nc\0nc\" : \"b\" }";
+ }
+};
+
+class NoValue : public Bad {
+ virtual string json() const {
+ return "{ a : }";
+ }
+};
+
+class InvalidValue : public Bad {
+ virtual string json() const {
+ return "{ a : a }";
+ }
+};
+
+class OkDollarFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("$where", 1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"$where\" : 1 }";
+ }
+};
+
+class SingleNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1 }";
+ }
+};
+
+class RealNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", strtod("0.7", 0));
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 0.7 }";
+ }
+};
+
+class FancyNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", strtod("-4.4433e-2", 0));
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : -4.4433e-2 }";
+ }
+};
+
+class TwoElements : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ b.append("b", "foo");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1, \"b\" : \"foo\" }";
+ }
+};
+
+class Subobject : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ BSONObjBuilder c;
+ c.append("z", b.done());
+ return c.obj();
+ }
+ virtual string json() const {
+ return "{ \"z\" : { \"a\" : 1 } }";
+ }
+};
+
+class DeeplyNestedObject : public Base {
+ virtual string buildJson(int depth) const {
+ if (depth == 0) {
+ return "{\"0\":true}";
+ } else {
+ std::stringstream ss;
+ ss << "{\"" << depth << "\":" << buildJson(depth - 1) << "}";
+ depth--;
+ return ss.str();
+ }
+ }
+ virtual BSONObj buildBson(int depth) const {
+ BSONObjBuilder builder;
+ if (depth == 0) {
+ builder.append("0", true);
+ return builder.obj();
+ } else {
+ std::stringstream ss;
+ ss << depth;
+ depth--;
+ builder.append(ss.str(), buildBson(depth));
+ return builder.obj();
+ }
+ }
+ virtual BSONObj bson() const {
+ return buildBson(35);
+ }
+ virtual string json() const {
+ return buildJson(35);
+ }
+};
+
+class ArrayEmpty : public Base {
+ virtual BSONObj bson() const {
+ vector<int> arr;
+ BSONObjBuilder b;
+ b.append("a", arr);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [] }";
+ }
+};
+
+class TopLevelArrayEmpty : public Base {
+ virtual BSONObj bson() const {
+ return BSONArray();
+ }
+ virtual string json() const {
+ return "[]";
+ }
+};
+
+class TopLevelArray : public Base {
+ virtual BSONObj bson() const {
+ BSONArrayBuilder builder;
+ builder.append(123);
+ builder.append("abc");
+ return builder.arr();
+ }
+ virtual string json() const {
+ return "[ 123, \"abc\" ]";
+ }
+};
+
+class Array : public Base {
+ virtual BSONObj bson() const {
+ vector<int> arr;
+ arr.push_back(1);
+ arr.push_back(2);
+ arr.push_back(3);
+ BSONObjBuilder b;
+ b.append("a", arr);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [ 1, 2, 3 ] }";
+ }
+};
+
+class True : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool("a", true);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : true }";
+ }
+};
+
+class False : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool("a", false);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : false }";
+ }
+};
+
+class Null : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNull("a");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : null }";
+ }
+};
+
+class Undefined : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendUndefined("a");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : undefined }";
+ }
+};
+
+class UndefinedStrict : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendUndefined("a");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$undefined\" : true } }";
+ }
+};
+
+class UndefinedStrictBad : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$undefined\" : false } }";
+ }
+};
+
+class EscapedCharacters : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "\" \\ / \b \f \n \r \t \v");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t \\v\" }";
+ }
+};
+
+class NonEscapedCharacters : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "% { a z $ # ' ");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\% \\{ \\a \\z \\$ \\# \\' \\ \" }";
+ }
+};
+
+class AllowedControlCharacter : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "\x7f");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\x7f\" }";
+ }
+};
+
+class InvalidControlCharacter : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : \"\x1f\" }";
+ }
+};
+
+class NumbersInFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("b1", "b");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ b1 : \"b\" }";
+ }
+};
+
+class EscapeFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("\n", "b");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\\n\" : \"b\" }";
+ }
+};
+
+class EscapedUnicodeToUtf8 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[7];
+ u[0] = 0xe0 | 0x0a;
+ u[1] = 0x80;
+ u[2] = 0x80;
+ u[3] = 0xe0 | 0x0a;
+ u[4] = 0x80;
+ u[5] = 0x80;
+ u[6] = 0;
+ b.append("a", (char*)u);
+ BSONObj built = b.obj();
+ ASSERT_EQUALS(string((char*)u), built.firstElement().valuestr());
+ return built;
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\ua000\\uA000\" }";
+ }
+};
+
+class Utf8AllOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[8];
+ u[0] = 0x01;
+
+ u[1] = 0x7f;
+
+ u[2] = 0xdf;
+ u[3] = 0xbf;
+
+ u[4] = 0xef;
+ u[5] = 0xbf;
+ u[6] = 0xbf;
+
+ u[7] = 0;
+
+ b.append("a", (char*)u);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
+ }
+};
+
+class Utf8FirstByteOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[6];
+ u[0] = 0xdc;
+ u[1] = 0x80;
+
+ u[2] = 0xef;
+ u[3] = 0xbc;
+ u[4] = 0x80;
+
+ u[5] = 0;
+
+ b.append("a", (char*)u);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0700\\uff00\" }";
+ }
+};
+
+class Utf8Invalid : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0ZZZ\" }";
+ }
+};
+
+class Utf8TooShort : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : \"\\u000\" }";
+ }
+};
+
+class DBRefConstructor : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", "000000000000000000000000");
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Dbref( \"ns\", \"000000000000000000000000\" ) }";
+ }
+};
+
+// Added for consistency with the mongo shell
+class DBRefConstructorCapitals : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", "000000000000000000000000");
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : DBRef( \"ns\", \"000000000000000000000000\" ) }";
+ }
+};
+
+class DBRefConstructorDbName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", "000000000000000000000000");
+ subBuilder.append("$db", "dbname");
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Dbref( \"ns\", \"000000000000000000000000\", \"dbname\" ) }";
+ }
+};
+
+class DBRefConstructorNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", 1);
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Dbref( \"ns\", 1 ) }";
+ }
+};
+
+class DBRefConstructorObject : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ BSONObjBuilder idSubBuilder(subBuilder.subobjStart("$id"));
+ idSubBuilder.append("b", true);
+ idSubBuilder.done();
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Dbref( \"ns\", { \"b\" : true } ) }";
+ }
+};
+
+class DBRefNumberId : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", 1);
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : 1 } }";
+ }
+};
+
+class DBRefObjectAsId : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ BSONObjBuilder idSubBuilder(subBuilder.subobjStart("$id"));
+ idSubBuilder.append("b", true);
+ idSubBuilder.done();
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : { \"b\" : true } } }";
+ }
+};
+
+class DBRefStringId : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", "000000000000000000000000");
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : \"000000000000000000000000\" } }";
+ }
+};
+
+class DBRefObjectIDObject : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", o);
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : { \"$oid\" : \"000000000000000000000000\" "
+ "} } }";
+ }
+};
+
+class DBRefObjectIDConstructor : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", o);
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : ObjectId( \"000000000000000000000000\" ) "
+ "} }";
+ }
+};
+
+class DBRefDbName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", "000000000000000000000000");
+ subBuilder.append("$db", "dbname");
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"ns\", \"$id\" : \"000000000000000000000000\""
+ ", \"$db\" : \"dbname\" } }";
+ }
+};
+
+class Oid : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendOID("_id");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : { \"$oid\" : \"000000000000000000000000\" } }";
+ }
+};
+
+class Oid2 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ char OIDbytes[OID::kOIDSize];
+ memset(&OIDbytes, 0x0f, OID::kOIDSize);
+ OID o = OID::from(OIDbytes);
+ b.appendOID("_id", &o);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0f\" ) }";
+ }
+};
+
+class OidTooLong : public Bad {
+ virtual string json() const {
+ return "{ \"_id\" : { \"$oid\" : \"0000000000000000000000000\" } }";
+ }
+};
+
+class Oid2TooLong : public Bad {
+ virtual string json() const {
+ return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0f0\" ) }";
+ }
+};
+
+class OidTooShort : public Bad {
+ virtual string json() const {
+ return "{ \"_id\" : { \"$oid\" : \"00000000000000000000000\" } }";
+ }
+};
+
+class Oid2TooShort : public Bad {
+ virtual string json() const {
+ return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0\" ) }";
+ }
+};
+
+class OidInvalidChar : public Bad {
+ virtual string json() const {
+ return "{ \"_id\" : { \"$oid\" : \"00000000000Z000000000000\" } }";
+ }
+};
+
+class Oid2InvalidChar : public Bad {
+ virtual string json() const {
+ return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0fZf0f0f0f0f0f\" ) }";
+ }
+};
+
+class StringId : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("_id", "000000000000000000000000");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : \"000000000000000000000000\" }";
+ }
+};
+
+class BinData : public Base {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinData1 : public Base {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, Function, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"01\" } }";
+ }
+};
+
+class BinData2 : public Base {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, ByteArrayDeprecated, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"02\" } }";
+ }
+};
+
+class BinData3 : public Base {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, bdtUUID, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"03\" } }";
+ }
+};
+
+class BinData4 : public Base {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, newUUID, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"04\" } }";
+ }
+};
+
+class BinData5 : public Base {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, MD5Type, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"05\" } }";
+ }
+};
+
+class BinData80 : public Base {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, bdtCustom, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"80\" } }";
+ }
+};
+
+class BinDataPaddedSingle : public Base {
+ virtual BSONObj bson() const {
+ char z[2];
+ z[0] = 'a';
+ z[1] = 'b';
+ BSONObjBuilder b;
+ b.appendBinData("a", 2, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataPaddedDouble : public Base {
+ virtual BSONObj bson() const {
+ char z[1];
+ z[0] = 'a';
+ BSONObjBuilder b;
+ b.appendBinData("a", 1, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataAllChars : public Base {
+ virtual BSONObj bson() const {
+ unsigned char z[] = {0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF};
+ BSONObjBuilder b;
+ b.appendBinData("a", 48, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : "
+ "\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\", \"$type\" : "
+ "\"00\" } }";
+ }
+};
+
+class BinDataBadLength : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQ=\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataBadLength1 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQ\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataBadLength2 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQX==\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataBadLength3 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQX\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataBadLength4 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQXZ=\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataBadLength5 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQXZ==\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataBadChars : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"a...\", \"$type\" : \"00\" } }";
+ }
+};
+
+class BinDataTypeTooShort : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"0\" } }";
+ }
+};
+
+class BinDataTypeTooLong : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"000\" } }";
+ }
+};
+
+class BinDataTypeBadChars : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"ZZ\" } }";
+ }
+};
+
+class BinDataEmptyType : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"\" } }";
+ }
+};
+
+class BinDataNoType : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"AAAA\" } }";
+ }
+};
+
+class BinDataInvalidType : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"AAAA\", \"$type\" : \"100\" } }";
+ }
+};
+
+class Date : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", 0);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 0 } }";
+ }
+};
+
+class DateNegZero : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", -0);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : -0 } }";
+ }
+};
+
+class DateNonzero : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", 1000000000);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 1000000000 } }";
+ }
+};
+
+class DateStrictTooLong : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : " << ~(0ULL) << "1"
+ << " } }";
+ return ss.str();
+ }
+};
+
+class DateTooLong : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : Date( " << ~(0ULL) << "1"
+ << " ) }";
+ return ss.str();
+ }
+};
+
+class DateIsString : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : \"100\" } }";
+ return ss.str();
+ }
+};
+
+class DateIsString1 : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : Date(\"a\") }";
+ return ss.str();
+ }
+};
+
+class DateIsString2 : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : new Date(\"a\") }";
+ return ss.str();
+ }
+};
+
+class DateIsFloat : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : 1.1 } }";
+ return ss.str();
+ }
+};
+
+class DateIsFloat1 : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : Date(1.1) }";
+ return ss.str();
+ }
+};
+
+class DateIsFloat2 : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : new Date(1.1) }";
+ return ss.str();
+ }
+};
+
+class DateIsExponent : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : 10e3 } }";
+ return ss.str();
+ }
+};
+
+class DateIsExponent1 : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : Date(10e3) }";
+ return ss.str();
+ }
+};
+
+class DateIsExponent2 : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : new Date(10e3) }";
+ return ss.str();
+ }
+};
+/* Need to handle this because jsonString outputs the value of Date_t as unsigned.
+ * See SERVER-8330 and SERVER-8573 */
+class DateStrictMaxUnsigned : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", -1);
+ return b.obj();
+ }
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : " << std::numeric_limits<unsigned long long>::max()
+ << " } }";
+ return ss.str();
+ }
+};
+
+class DateMaxUnsigned : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", -1);
+ return b.obj();
+ }
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : Date( " << std::numeric_limits<unsigned long long>::max() << " ) }";
+ return ss.str();
+ }
+};
+
+class DateStrictNegative : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", -1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : -1 } }";
+ }
+};
+
+class DateNegative : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", -1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Date( -1 ) }";
+ }
+};
+
+class NumberLongTest : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNumber("a", 20000LL);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : NumberLong( 20000 ) }";
+ }
+};
+
+class NumberLongMin : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNumber("a", std::numeric_limits<long long>::min());
+ return b.obj();
+ }
+ virtual string json() const {
+ std::stringstream ss;
+ ss << "{'a': NumberLong(";
+ ss << std::numeric_limits<long long>::min();
+ ss << ") }";
+ return ss.str();
+ }
+};
+
+class NumberIntTest : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNumber("a", 20000);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : NumberInt( 20000 ) }";
+ }
+};
+
+class NumberLongNeg : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNumber("a", -20000LL);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : NumberLong( -20000 ) }";
+ }
+};
+
+class NumberIntNeg : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNumber("a", -20000);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : NumberInt( -20000 ) }";
+ }
+};
+
+class NumberLongBad : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : NumberLong( 'sdf' ) }";
+ }
+};
+
+class NumberIntBad : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : NumberInt( 'sdf' ) }";
+ }
+};
+
+class Timestamp : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendTimestamp("a", (unsigned long long)20000, 5);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( 20, 5 ) }";
+ }
+};
+
+class TimestampNoIncrement : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( 20 ) }";
+ }
+};
+
+class TimestampZero : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendTimestamp("a", 0ULL, 0);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( 0, 0 ) }";
+ }
+};
+
+class TimestampNoArgs : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : Timestamp() }";
+ }
+};
+
+class TimestampFloatSeconds : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( 20.0, 1 ) }";
+ }
+};
+
+class TimestampFloatIncrement : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( 20, 1.0 ) }";
+ }
+};
+
+class TimestampNegativeSeconds : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( -20, 5 ) }";
+ }
+};
+
+class TimestampNegativeIncrement : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( 20, -5 ) }";
+ }
+};
+
+class TimestampInvalidSeconds : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( q, 5 ) }";
+ }
+};
+
+class TimestampObject : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendTimestamp("a", (unsigned long long)20000, 5);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20 , \"i\" : 5 } } }";
+ }
+};
+
+class TimestampObjectInvalidFieldName : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"time\" : 20 , \"increment\" : 5 } } }";
+ }
+};
+
+class TimestampObjectNoIncrement : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20 } } }";
+ }
+};
+
+class TimestampObjectNegativeSeconds : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : -20 , \"i\" : 5 } } }";
+ }
+};
+
+class TimestampObjectNegativeIncrement : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20 , \"i\" : -5 } } }";
+ }
+};
+
+class TimestampObjectInvalidSeconds : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : q , \"i\" : 5 } } }";
+ }
+};
+
+class TimestampObjectZero : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendTimestamp("a", 0ULL, 0);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : 0, \"i\" : 0} } }";
+ }
+};
+
+class TimestampObjectNoArgs : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { } } }";
+ }
+};
+
+class TimestampObjectFloatSeconds : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : 1.0, \"i\" : 0} } }";
+ }
+};
+
+class TimestampObjectFloatIncrement : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$timestamp\" : { \"t\" : 20, \"i\" : 1.0} } }";
+ }
+};
+
+class Regex : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "b", "i");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"i\" } }";
+ }
+};
+
+class RegexNoOptionField : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "b", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\" } }";
+ }
+};
+
+class RegexEscape : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "\t", "i");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"\\t\", \"$options\" : \"i\" } }";
+ }
+};
+
+class RegexWithQuotes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "\"", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : /\"/ }";
+ }
+};
+
+class RegexWithQuotes1 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "\"", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { $regex : \"\\\"\" }}";
+ }
+};
+
+class RegexInvalidField : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"field\" : \"i\" } }";
+ }
+};
+
+class RegexInvalidOption : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"1\" } }";
+ }
+};
+
+class RegexInvalidOption2 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : /b/c }";
+ }
+};
+
+class RegexInvalidOption3 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : /b/ic }";
+ }
+};
+
+class RegexInvalidOption4 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"a\" } }";
+ }
+};
+
+class RegexInvalidOption5 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : /b/a }";
+ }
+};
+
+class RegexEmptyOption : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "b", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"\" } }";
+ }
+};
+
+class RegexEmpty : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"\", \"$options\" : \"\"} }";
+ }
+};
+
+class RegexEmpty1 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : // }";
+ }
+};
+
+class RegexOverlap : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : // } }";
+ }
+};
+
+class Malformed : public Bad {
+ string json() const {
+ return "{";
+ }
+};
+
+class Malformed1 : public Bad {
+ string json() const {
+ return "}";
+ }
+};
+
+class Malformed2 : public Bad {
+ string json() const {
+ return "{test}";
+ }
+};
+
+class Malformed3 : public Bad {
+ string json() const {
+ return "{test";
+ }
+};
+
+class Malformed4 : public Bad {
+ string json() const {
+ return "{ test : 1";
+ }
+};
+
+class Malformed5 : public Bad {
+ string json() const {
+ return "{ test : 1 , }";
+ }
+};
+
+class Malformed6 : public Bad {
+ string json() const {
+ return "{ test : 1 , tst}";
+ }
+};
+
+class Malformed7 : public Bad {
+ string json() const {
+ return "{ a : []";
+ }
+};
+
+class Malformed8 : public Bad {
+ string json() const {
+ return "{ a : { test : 1 }";
+ }
+};
+
+class Malformed9 : public Bad {
+ string json() const {
+ return "{ a : [ { test : 1]}";
+ }
+};
+
+class Malformed10 : public Bad {
+ string json() const {
+ return "{ a : [ { test : 1], b : 2}";
+ }
+};
+
+class Malformed11 : public Bad {
+ string json() const {
+ return "{ a : \"test\"string }";
+ }
+};
+
+class Malformed12 : public Bad {
+ string json() const {
+ return "{ a : test\"string\" }";
+ }
+};
+
+class Malformed13 : public Bad {
+ string json() const {
+ return "{ a\"bad\" : \"teststring\" }";
+ }
+};
+
+class Malformed14 : public Bad {
+ string json() const {
+ return "{ \"a\"test : \"teststring\" }";
+ }
+};
+
+class Malformed15 : public Bad {
+ string json() const {
+ return "{ \"atest : \"teststring\" }";
+ }
+};
+
+class Malformed16 : public Bad {
+ string json() const {
+ return "{ atest\" : \"teststring\" }";
+ }
+};
+
+class Malformed17 : public Bad {
+ string json() const {
+ return "{ atest\" : 1 }";
+ }
+};
+
+class Malformed18 : public Bad {
+ string json() const {
+ return "{ atest : \"teststring }";
+ }
+};
+
+class Malformed19 : public Bad {
+ string json() const {
+ return "{ atest : teststring\" }";
+ }
+};
+
+class UnquotedFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a_b", 1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ a_b : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad : public Bad {
+ string json() const {
+ return "{ 123 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad1 : public Bad {
+ string json() const {
+ return "{ -123 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad2 : public Bad {
+ string json() const {
+ return "{ .123 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad3 : public Bad {
+ string json() const {
+ return "{ -.123 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad4 : public Bad {
+ string json() const {
+ return "{ -1.23 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad5 : public Bad {
+ string json() const {
+ return "{ 1e23 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad6 : public Bad {
+ string json() const {
+ return "{ -1e23 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad7 : public Bad {
+ string json() const {
+ return "{ -1e-23 : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad8 : public Bad {
+ string json() const {
+ return "{ -hello : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad9 : public Bad {
+ string json() const {
+ return "{ il.legal : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad10 : public Bad {
+ string json() const {
+ return "{ 10gen : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad11 : public Bad {
+ string json() const {
+ return "{ _123. : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad12 : public Bad {
+ string json() const {
+ return "{ he-llo : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad13 : public Bad {
+ string json() const {
+ return "{ bad\nchar : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad14 : public Bad {
+ string json() const {
+ return "{ thiswill\fail : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad15 : public Bad {
+ string json() const {
+ return "{ failu\re : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad16 : public Bad {
+ string json() const {
+ return "{ t\test : 1 }";
+ }
+};
+
+class UnquotedFieldNameBad17 : public Bad {
+ string json() const {
+ return "{ \break: 1 }";
+ }
+};
+
+class UnquotedFieldNameBad18 : public Bad {
+ string json() const {
+ // here we fill the memory directly to test unicode values
+ // In this case we set \u0700 and \uFF00
+ // Setting it directly in memory avoids MSVC error c4566
+ unsigned char u[6];
+ u[0] = 0xdc;
+ u[1] = 0x80;
+
+ u[2] = 0xef;
+ u[3] = 0xbc;
+ u[4] = 0x80;
+
+ u[5] = 0;
+ std::stringstream ss;
+ ss << "{ " << u << " : 1 }";
+ return ss.str();
+ }
+};
+
+class UnquotedFieldNameBad19 : public Bad {
+ string json() const {
+ return "{ bl\\u3333p: 1 }";
+ }
+};
+
+class UnquotedFieldNameBad20 : public Bad {
+ string json() const {
+ return "{ bl-33p: 1 }";
+ }
+};
+
+class UnquotedFieldNameDollar : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("$a_b", 1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ $a_b : 1 }";
+ }
+};
+
+class SingleQuotes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("ab'c\"", "bb\b '\"");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ 'ab\\'c\"' : 'bb\\b \\'\"' }";
+ }
+};
+
+class QuoteTest : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("\"", "test");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ '\"' : \"test\" }";
+ }
+};
+
+class QuoteTest1 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("'", "test");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"'\" : \"test\" }";
+ }
+};
+
+class QuoteTest2 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("\"", "test");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ '\"' : \"test\" }";
+ }
+};
+
+class QuoteTest3 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("\"'\"", "test");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ '\"\\\'\"' : \"test\" }";
+ }
+};
+
+class QuoteTest4 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("'\"'", "test");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"'\\\"'\" : \"test\" }";
+ }
+};
+
+class QuoteTest5 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("test", "'");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"test\" : \"'\" }";
+ }
+};
+
+class QuoteTest6 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("test", "\"");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"test\" : '\"' }";
+ }
+};
+
+class ObjectId : public Base {
+ virtual BSONObj bson() const {
+ OID id;
+ id.init("deadbeeff00ddeadbeeff00d");
+ BSONObjBuilder b;
+ b.appendOID("_id", &id);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
+ }
+};
+
+class ObjectId2 : public Base {
+ virtual BSONObj bson() const {
+ OID id;
+ id.init("deadbeeff00ddeadbeeff00d");
+ BSONObjBuilder b;
+ b.appendOID("foo", &id);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"foo\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
+ }
+};
+
+class NumericTypes : public Base {
+public:
+ void run() {
+ Base::run();
+
+ BSONObj o = fromjson(json());
+
+ ASSERT(o["int"].type() == NumberInt);
+ ASSERT(o["long"].type() == NumberLong);
+ ASSERT(o["double"].type() == NumberDouble);
+
+ ASSERT(o["long"].numberLong() == 9223372036854775807ll);
+ }
+
+ virtual BSONObj bson() const {
+ return BSON("int" << 123 << "long" << 9223372036854775807ll // 2**63 - 1
+ << "double" << 3.14);
+ }
+ virtual string json() const {
+ return "{ \"int\": 123, \"long\": 9223372036854775807, \"double\": 3.14 }";
+ }
+};
+
+class NumericTypesJS : public Base {
+public:
+ void run() {
+ Base::run();
+
+ BSONObj o = fromjson(json());
+
+ ASSERT(o["int"].type() == NumberInt);
+ ASSERT(o["long"].type() == NumberLong);
+ ASSERT(o["double"].type() == NumberDouble);
+
+ ASSERT(o["long"].numberLong() == 9223372036854775807ll);
+ }
+
+ virtual BSONObj bson() const {
+ return BSON("int" << 123 << "long" << 9223372036854775807ll // 2**63 - 1
+ << "double" << 3.14);
+ }
+ virtual string json() const {
+ return "{ 'int': NumberInt(123), "
+ "'long': NumberLong(9223372036854775807), "
+ "'double': 3.14 }";
+ }
+};
+
+class NumericLongMin : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNumber("a", std::numeric_limits<long long>::min());
+ return b.obj();
+ }
+ virtual string json() const {
+ std::stringstream ss;
+ ss << "{'a': ";
+ ss << std::numeric_limits<long long>::min();
+ ss << " }";
+ return ss.str();
+ }
+};
+
+class NumericIntMin : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNumber("a", std::numeric_limits<int>::min());
+ return b.obj();
+ }
+ virtual string json() const {
+ std::stringstream ss;
+ ss << "{'a': ";
+ ss << std::numeric_limits<int>::min();
+ ss << " }";
+ return ss.str();
+ }
+};
+
+
+class NumericLimits : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder builder;
+ BSONArrayBuilder numArray(builder.subarrayStart(""));
+ numArray.append(std::numeric_limits<long long>::max());
+ numArray.append(std::numeric_limits<long long>::min());
+ numArray.append(std::numeric_limits<int>::max());
+ numArray.append(std::numeric_limits<int>::min());
+ numArray.done();
+ return builder.obj();
+ }
+ virtual string json() const {
+ std::stringstream ss;
+ ss << "{'': [";
+ ss << std::numeric_limits<long long>::max() << ",";
+ ss << std::numeric_limits<long long>::min() << ",";
+ ss << std::numeric_limits<int>::max() << ",";
+ ss << std::numeric_limits<int>::min();
+ ss << "] }";
+ return ss.str();
+ }
+};
+
+// Overflows double by giving it an exponent that is too large
+class NumericLimitsBad : public Bad {
+ virtual string json() const {
+ std::stringstream ss;
+ ss << "{ test : ";
+ ss << std::numeric_limits<double>::max() << "1111111111";
+ ss << "}";
+ return ss.str();
+ }
+};
+
+class NumericLimitsBad1 : public Bad {
+ virtual string json() const {
+ std::stringstream ss;
+ ss << "{ test : ";
+ ss << std::numeric_limits<double>::min() << "11111111111";
+ ss << "}";
+ return ss.str();
+ }
+};
+
+class NegativeNumericTypes : public Base {
+public:
+ void run() {
+ Base::run();
+
+ BSONObj o = fromjson(json());
+
+ ASSERT(o["int"].type() == NumberInt);
+ ASSERT(o["long"].type() == NumberLong);
+ ASSERT(o["double"].type() == NumberDouble);
+
+ ASSERT(o["long"].numberLong() == -9223372036854775807ll);
+ }
+
+ virtual BSONObj bson() const {
+ return BSON("int" << -123 << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
+ << "double" << -3.14);
+ }
+ virtual string json() const {
+ return "{ \"int\": -123, \"long\": -9223372036854775807, \"double\": -3.14 }";
+ }
+};
+
+class EmbeddedDatesBase : public Base {
+public:
+ virtual void run() {
+ BSONObj o = fromjson(json());
+ ASSERT_EQUALS(3, (o["time.valid"].type()));
+ BSONObj e = o["time.valid"].embeddedObjectUserCheck();
+ ASSERT_EQUALS(9, e["$gt"].type());
+ ASSERT_EQUALS(9, e["$lt"].type());
+ Base::run();
+ }
+
+ BSONObj bson() const {
+ BSONObjBuilder e;
+ e.appendDate("$gt", 1257829200000LL);
+ e.appendDate("$lt", 1257829200100LL);
+
+ BSONObjBuilder b;
+ b.append("time.valid", e.obj());
+ return b.obj();
+ }
+ virtual string json() const = 0;
+};
+
+struct EmbeddedDatesFormat1 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : { \"$date\" : 1257829200000 } , $lt : { \"$date\" : "
+ "1257829200100 } } }";
+ }
+};
+struct EmbeddedDatesFormat2 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : Date(1257829200000) , $lt : Date( 1257829200100 ) } }";
+ }
+};
+struct EmbeddedDatesFormat3 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : new Date(1257829200000) , $lt : new Date( 1257829200100 "
+ ") } }";
+ }
+};
+
+class NullString : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("x", "a\0b", 4);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"x\" : \"a\\u0000b\" }";
+ }
+};
+
+class NullFieldUnquoted : public Bad {
+ virtual string json() const {
+ return "{ x\\u0000y : \"a\" }";
+ }
+};
+
+class MinKeyAlone : public Bad {
+ virtual string json() const {
+ return "{ \"$minKey\" : 1 }";
+ }
+};
+
+class MaxKeyAlone : public Bad {
+ virtual string json() const {
+ return "{ \"$maxKey\" : 1 }";
+ }
+};
+
+class MinKey : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendMinKey("a");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$minKey\" : 1 } }";
+ }
+};
+
+class MaxKey : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendMaxKey("a");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$maxKey\" : 1 } }";
+ }
+};
+
+} // namespace FromJsonTests
+
+class All : public Suite {
+public:
+ All() : Suite("json") {}
+
+ void setupTests() {
+ add<JsonStringTests::Empty>();
+ add<JsonStringTests::SingleStringMember>();
+ add<JsonStringTests::EscapedCharacters>();
+ add<JsonStringTests::AdditionalControlCharacters>();
+ add<JsonStringTests::ExtendedAscii>();
+ add<JsonStringTests::EscapeFieldName>();
+ add<JsonStringTests::SingleIntMember>();
+ add<JsonStringTests::SingleNumberMember>();
+ add<JsonStringTests::InvalidNumbers>();
+ add<JsonStringTests::NumberPrecision>();
+ add<JsonStringTests::NegativeNumber>();
+ add<JsonStringTests::NumberLongStrict>();
+ add<JsonStringTests::NumberLongStrictLarge>();
+ add<JsonStringTests::NumberLongStrictNegative>();
+ add<JsonStringTests::NumberDoubleNaN>();
+ add<JsonStringTests::NumberDoubleInfinity>();
+ add<JsonStringTests::NumberDoubleNegativeInfinity>();
+ add<JsonStringTests::SingleBoolMember>();
+ add<JsonStringTests::SingleNullMember>();
+ add<JsonStringTests::SingleUndefinedMember>();
+ add<JsonStringTests::SingleObjectMember>();
+ add<JsonStringTests::TwoMembers>();
+ add<JsonStringTests::EmptyArray>();
+ add<JsonStringTests::Array>();
+ add<JsonStringTests::DBRef>();
+ add<JsonStringTests::DBRefZero>();
+ add<JsonStringTests::ObjectId>();
+ add<JsonStringTests::BinData>();
+ add<JsonStringTests::Symbol>();
+ add<JsonStringTests::Date>();
+ add<JsonStringTests::DateNegative>();
+ add<JsonStringTests::Regex>();
+ add<JsonStringTests::RegexEscape>();
+ add<JsonStringTests::RegexManyOptions>();
+ add<JsonStringTests::CodeTests>();
+ add<JsonStringTests::CodeWScopeTests>();
+ add<JsonStringTests::TimestampTests>();
+ add<JsonStringTests::NullString>();
+ add<JsonStringTests::AllTypes>();
+
+ add<FromJsonTests::Empty>();
+ add<FromJsonTests::EmptyWithSpace>();
+ add<FromJsonTests::SingleString>();
+ add<FromJsonTests::EmptyStrings>();
+ add<FromJsonTests::ReservedFieldName>();
+ add<FromJsonTests::ReservedFieldName1>();
+ add<FromJsonTests::NumberFieldName>();
+ add<FromJsonTests::InvalidFieldName>();
+ add<FromJsonTests::QuotedNullName>();
+ add<FromJsonTests::NoValue>();
+ add<FromJsonTests::InvalidValue>();
+ add<FromJsonTests::InvalidValue>();
+ add<FromJsonTests::OkDollarFieldName>();
+ add<FromJsonTests::SingleNumber>();
+ add<FromJsonTests::RealNumber>();
+ add<FromJsonTests::FancyNumber>();
+ add<FromJsonTests::TwoElements>();
+ add<FromJsonTests::Subobject>();
+ add<FromJsonTests::DeeplyNestedObject>();
+ add<FromJsonTests::ArrayEmpty>();
+ add<FromJsonTests::TopLevelArrayEmpty>();
+ add<FromJsonTests::TopLevelArray>();
+ add<FromJsonTests::Array>();
+ add<FromJsonTests::True>();
+ add<FromJsonTests::False>();
+ add<FromJsonTests::Null>();
+ add<FromJsonTests::Undefined>();
+ add<FromJsonTests::UndefinedStrict>();
+ add<FromJsonTests::UndefinedStrictBad>();
+ add<FromJsonTests::EscapedCharacters>();
+ add<FromJsonTests::NonEscapedCharacters>();
+ add<FromJsonTests::AllowedControlCharacter>();
+ add<FromJsonTests::InvalidControlCharacter>();
+ add<FromJsonTests::NumbersInFieldName>();
+ add<FromJsonTests::EscapeFieldName>();
+ add<FromJsonTests::EscapedUnicodeToUtf8>();
+ add<FromJsonTests::Utf8AllOnes>();
+ add<FromJsonTests::Utf8FirstByteOnes>();
+ add<FromJsonTests::Utf8Invalid>();
+ add<FromJsonTests::Utf8TooShort>();
+ add<FromJsonTests::DBRefConstructor>();
+ add<FromJsonTests::DBRefConstructorCapitals>();
+ add<FromJsonTests::DBRefConstructorDbName>();
+ add<FromJsonTests::DBRefConstructorNumber>();
+ add<FromJsonTests::DBRefConstructorObject>();
+ add<FromJsonTests::DBRefNumberId>();
+ add<FromJsonTests::DBRefObjectAsId>();
+ add<FromJsonTests::DBRefStringId>();
+ add<FromJsonTests::DBRefObjectIDObject>();
+ add<FromJsonTests::DBRefObjectIDConstructor>();
+ add<FromJsonTests::DBRefDbName>();
+ add<FromJsonTests::Oid>();
+ add<FromJsonTests::Oid2>();
+ add<FromJsonTests::OidTooLong>();
+ add<FromJsonTests::Oid2TooLong>();
+ add<FromJsonTests::OidTooShort>();
+ add<FromJsonTests::Oid2TooShort>();
+ add<FromJsonTests::OidInvalidChar>();
+ add<FromJsonTests::Oid2InvalidChar>();
+ add<FromJsonTests::StringId>();
+ add<FromJsonTests::BinData>();
+ add<FromJsonTests::BinData1>();
+ add<FromJsonTests::BinData2>();
+ add<FromJsonTests::BinData3>();
+ add<FromJsonTests::BinData4>();
+ add<FromJsonTests::BinData5>();
+ add<FromJsonTests::BinData80>();
+ add<FromJsonTests::BinDataPaddedSingle>();
+ add<FromJsonTests::BinDataPaddedDouble>();
+ add<FromJsonTests::BinDataAllChars>();
+ add<FromJsonTests::BinDataBadLength>();
+ add<FromJsonTests::BinDataBadLength1>();
+ add<FromJsonTests::BinDataBadLength2>();
+ add<FromJsonTests::BinDataBadLength3>();
+ add<FromJsonTests::BinDataBadLength4>();
+ add<FromJsonTests::BinDataBadLength5>();
+ add<FromJsonTests::BinDataBadChars>();
+ add<FromJsonTests::BinDataTypeTooShort>();
+ add<FromJsonTests::BinDataTypeTooLong>();
+ add<FromJsonTests::BinDataTypeBadChars>();
+ add<FromJsonTests::BinDataEmptyType>();
+ add<FromJsonTests::BinDataNoType>();
+ add<FromJsonTests::BinDataInvalidType>();
+ // DOCS-2539: We cannot parse dates generated with a Unix timestamp of zero in local
+ // time, since the body of the date may be before the Unix Epoch. This causes parsing
+ // to fail even if the offset would properly adjust the time. For example,
+ // "1969-12-31T19:00:00-05:00" actually represents the Unix timestamp of zero, but we
+ // cannot parse it because the body of the date is before 1970.
+ // add< FromJsonTests::Date >();
+ // add< FromJsonTests::DateNegZero >();
+ add<FromJsonTests::DateNonzero>();
+ add<FromJsonTests::DateStrictTooLong>();
+ add<FromJsonTests::DateTooLong>();
+ add<FromJsonTests::DateIsString>();
+ add<FromJsonTests::DateIsString1>();
+ add<FromJsonTests::DateIsString2>();
+ add<FromJsonTests::DateIsFloat>();
+ add<FromJsonTests::DateIsFloat1>();
+ add<FromJsonTests::DateIsFloat2>();
+ add<FromJsonTests::DateIsExponent>();
+ add<FromJsonTests::DateIsExponent1>();
+ add<FromJsonTests::DateIsExponent2>();
+ add<FromJsonTests::DateStrictMaxUnsigned>();
+ add<FromJsonTests::DateMaxUnsigned>();
+ add<FromJsonTests::DateStrictNegative>();
+ add<FromJsonTests::DateNegative>();
+ add<FromJsonTests::NumberLongTest>();
+ add<FromJsonTests::NumberLongMin>();
+ add<FromJsonTests::NumberIntTest>();
+ add<FromJsonTests::NumberLongNeg>();
+ add<FromJsonTests::NumberIntNeg>();
+ add<FromJsonTests::NumberLongBad>();
+ add<FromJsonTests::NumberIntBad>();
+ add<FromJsonTests::Timestamp>();
+ add<FromJsonTests::TimestampNoIncrement>();
+ add<FromJsonTests::TimestampZero>();
+ add<FromJsonTests::TimestampNoArgs>();
+ add<FromJsonTests::TimestampFloatSeconds>();
+ add<FromJsonTests::TimestampFloatIncrement>();
+ add<FromJsonTests::TimestampNegativeSeconds>();
+ add<FromJsonTests::TimestampNegativeIncrement>();
+ add<FromJsonTests::TimestampInvalidSeconds>();
+ add<FromJsonTests::TimestampObject>();
+ add<FromJsonTests::TimestampObjectInvalidFieldName>();
+ add<FromJsonTests::TimestampObjectNoIncrement>();
+ add<FromJsonTests::TimestampObjectNegativeSeconds>();
+ add<FromJsonTests::TimestampObjectNegativeIncrement>();
+ add<FromJsonTests::TimestampObjectInvalidSeconds>();
+ add<FromJsonTests::TimestampObjectZero>();
+ add<FromJsonTests::TimestampObjectNoArgs>();
+ add<FromJsonTests::TimestampObjectFloatSeconds>();
+ add<FromJsonTests::TimestampObjectFloatIncrement>();
+ add<FromJsonTests::Regex>();
+ add<FromJsonTests::RegexNoOptionField>();
+ add<FromJsonTests::RegexEscape>();
+ add<FromJsonTests::RegexWithQuotes>();
+ add<FromJsonTests::RegexWithQuotes1>();
+ add<FromJsonTests::RegexInvalidField>();
+ add<FromJsonTests::RegexInvalidOption>();
+ add<FromJsonTests::RegexInvalidOption2>();
+ add<FromJsonTests::RegexInvalidOption3>();
+ add<FromJsonTests::RegexInvalidOption4>();
+ add<FromJsonTests::RegexInvalidOption5>();
+ add<FromJsonTests::RegexEmptyOption>();
+ add<FromJsonTests::RegexEmpty>();
+ add<FromJsonTests::RegexEmpty1>();
+ add<FromJsonTests::RegexOverlap>();
+ add<FromJsonTests::Malformed>();
+ add<FromJsonTests::Malformed1>();
+ add<FromJsonTests::Malformed2>();
+ add<FromJsonTests::Malformed3>();
+ add<FromJsonTests::Malformed4>();
+ add<FromJsonTests::Malformed5>();
+ add<FromJsonTests::Malformed6>();
+ add<FromJsonTests::Malformed7>();
+ add<FromJsonTests::Malformed8>();
+ add<FromJsonTests::Malformed9>();
+ add<FromJsonTests::Malformed10>();
+ add<FromJsonTests::Malformed11>();
+ add<FromJsonTests::Malformed12>();
+ add<FromJsonTests::Malformed13>();
+ add<FromJsonTests::Malformed14>();
+ add<FromJsonTests::Malformed15>();
+ add<FromJsonTests::Malformed16>();
+ add<FromJsonTests::Malformed17>();
+ add<FromJsonTests::Malformed18>();
+ add<FromJsonTests::Malformed19>();
+ add<FromJsonTests::UnquotedFieldName>();
+ add<FromJsonTests::UnquotedFieldNameBad>();
+ add<FromJsonTests::UnquotedFieldNameBad1>();
+ add<FromJsonTests::UnquotedFieldNameBad2>();
+ add<FromJsonTests::UnquotedFieldNameBad3>();
+ add<FromJsonTests::UnquotedFieldNameBad4>();
+ add<FromJsonTests::UnquotedFieldNameBad5>();
+ add<FromJsonTests::UnquotedFieldNameBad6>();
+ add<FromJsonTests::UnquotedFieldNameBad7>();
+ add<FromJsonTests::UnquotedFieldNameBad8>();
+ add<FromJsonTests::UnquotedFieldNameBad9>();
+ add<FromJsonTests::UnquotedFieldNameBad10>();
+ add<FromJsonTests::UnquotedFieldNameBad11>();
+ add<FromJsonTests::UnquotedFieldNameBad12>();
+ add<FromJsonTests::UnquotedFieldNameBad13>();
+ add<FromJsonTests::UnquotedFieldNameBad14>();
+ add<FromJsonTests::UnquotedFieldNameBad15>();
+ add<FromJsonTests::UnquotedFieldNameBad16>();
+ add<FromJsonTests::UnquotedFieldNameBad17>();
+ add<FromJsonTests::UnquotedFieldNameBad18>();
+ add<FromJsonTests::UnquotedFieldNameBad19>();
+ add<FromJsonTests::UnquotedFieldNameBad20>();
+ add<FromJsonTests::UnquotedFieldNameDollar>();
+ add<FromJsonTests::SingleQuotes>();
+ add<FromJsonTests::QuoteTest>();
+ add<FromJsonTests::QuoteTest1>();
+ add<FromJsonTests::QuoteTest2>();
+ add<FromJsonTests::QuoteTest3>();
+ add<FromJsonTests::QuoteTest4>();
+ add<FromJsonTests::QuoteTest5>();
+ add<FromJsonTests::QuoteTest6>();
+ add<FromJsonTests::ObjectId>();
+ add<FromJsonTests::ObjectId2>();
+ add<FromJsonTests::NumericIntMin>();
+ add<FromJsonTests::NumericLongMin>();
+ add<FromJsonTests::NumericTypes>();
+ add<FromJsonTests::NumericTypesJS>();
+ add<FromJsonTests::NumericLimits>();
+ add<FromJsonTests::NumericLimitsBad>();
+ add<FromJsonTests::NumericLimitsBad1>();
+ add<FromJsonTests::NegativeNumericTypes>();
+ add<FromJsonTests::EmbeddedDatesFormat1>();
+ add<FromJsonTests::EmbeddedDatesFormat2>();
+ add<FromJsonTests::EmbeddedDatesFormat3>();
+ add<FromJsonTests::NullString>();
+ add<FromJsonTests::NullFieldUnquoted>();
+ add<FromJsonTests::MinKey>();
+ add<FromJsonTests::MaxKey>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace JsonTests
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 67299420841..2842daffea3 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -57,2119 +57,2196 @@ using std::vector;
namespace JSTests {
- class BuiltinTests {
+class BuiltinTests {
+public:
+ void run() {
+ // Run any tests included with the scripting engine
+ globalScriptEngine->runTest();
+ }
+};
+
+class BasicScope {
+public:
+ void run() {
+ scoped_ptr<Scope> s;
+ s.reset(globalScriptEngine->newScope());
+
+ s->setNumber("x", 5);
+ ASSERT(5 == s->getNumber("x"));
+
+ s->setNumber("x", 1.67);
+ ASSERT(1.67 == s->getNumber("x"));
+
+ s->setString("s", "eliot was here");
+ ASSERT("eliot was here" == s->getString("s"));
+
+ s->setBoolean("b", true);
+ ASSERT(s->getBoolean("b"));
+
+ s->setBoolean("b", false);
+ ASSERT(!s->getBoolean("b"));
+ }
+};
+
+class ResetScope {
+public:
+ void run() {
+ /* Currently reset does not clear data in v8 or spidermonkey scopes. See SECURITY-10
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->setBoolean( "x" , true );
+ ASSERT( s->getBoolean( "x" ) );
+
+ s->reset();
+ ASSERT( !s->getBoolean( "x" ) );
+ */
+ }
+};
+
+class FalseTests {
+public:
+ void run() {
+ // Test falsy javascript values
+ scoped_ptr<Scope> s;
+ s.reset(globalScriptEngine->newScope());
+
+ ASSERT(!s->getBoolean("notSet"));
+
+ s->setString("emptyString", "");
+ ASSERT(!s->getBoolean("emptyString"));
+
+ s->setNumber("notANumberVal", std::numeric_limits<double>::quiet_NaN());
+ ASSERT(!s->getBoolean("notANumberVal"));
+
+ s->setElement("nullVal", BSONObjBuilder().appendNull("null").obj().getField("null"));
+ ASSERT(!s->getBoolean("nullVal"));
+
+ s->setNumber("zeroVal", 0);
+ ASSERT(!s->getBoolean("zeroVal"));
+ }
+};
+
+class SimpleFunctions {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ s->invoke("x=5;", 0, 0);
+ ASSERT(5 == s->getNumber("x"));
+
+ s->invoke("return 17;", 0, 0);
+ ASSERT(17 == s->getNumber("__returnValue"));
+
+ s->invoke("function(){ return 17; }", 0, 0);
+ ASSERT(17 == s->getNumber("__returnValue"));
+
+ s->setNumber("x", 1.76);
+ s->invoke("return x == 1.76; ", 0, 0);
+ ASSERT(s->getBoolean("__returnValue"));
+
+ s->setNumber("x", 1.76);
+ s->invoke("return x == 1.79; ", 0, 0);
+ ASSERT(!s->getBoolean("__returnValue"));
+
+ BSONObj obj = BSON("" << 11.0);
+ s->invoke("function( z ){ return 5 + z; }", &obj, 0);
+ ASSERT_EQUALS(16, s->getNumber("__returnValue"));
+
+ delete s;
+ }
+};
+
+/** Installs a tee for auditing log messages in the same thread. */
+class LogRecordingScope {
+public:
+ LogRecordingScope()
+ : _logged(false),
+ _threadName(mongo::getThreadName()),
+ _handle(mongo::logger::globalLogDomain()->attachAppender(
+ mongo::logger::MessageLogDomain::AppenderAutoPtr(new Tee(this)))) {}
+ ~LogRecordingScope() {
+ mongo::logger::globalLogDomain()->detachAppender(_handle);
+ }
+ /** @return most recent log entry. */
+ bool logged() const {
+ return _logged;
+ }
+
+private:
+ class Tee : public mongo::logger::MessageLogDomain::EventAppender {
public:
- void run() {
- // Run any tests included with the scripting engine
- globalScriptEngine->runTest();
+ Tee(LogRecordingScope* scope) : _scope(scope) {}
+ virtual ~Tee() {}
+ virtual Status append(const logger::MessageEventEphemeral& event) {
+ // Don't want to consider logging by background threads.
+ if (mongo::getThreadName() == _scope->_threadName) {
+ _scope->_logged = true;
+ }
+ return Status::OK();
}
- };
-
- class BasicScope {
- public:
- void run() {
- scoped_ptr<Scope> s;
- s.reset( globalScriptEngine->newScope() );
-
- s->setNumber( "x" , 5 );
- ASSERT( 5 == s->getNumber( "x" ) );
-
- s->setNumber( "x" , 1.67 );
- ASSERT( 1.67 == s->getNumber( "x" ) );
-
- s->setString( "s" , "eliot was here" );
- ASSERT( "eliot was here" == s->getString( "s" ) );
-
- s->setBoolean( "b" , true );
- ASSERT( s->getBoolean( "b" ) );
-
- s->setBoolean( "b" , false );
- ASSERT( ! s->getBoolean( "b" ) );
- }
- };
-
- class ResetScope {
- public:
- void run() {
- /* Currently reset does not clear data in v8 or spidermonkey scopes. See SECURITY-10
- auto_ptr<Scope> s;
- s.reset( globalScriptEngine->newScope() );
-
- s->setBoolean( "x" , true );
- ASSERT( s->getBoolean( "x" ) );
-
- s->reset();
- ASSERT( !s->getBoolean( "x" ) );
- */
- }
- };
-
- class FalseTests {
- public:
- void run() {
- // Test falsy javascript values
- scoped_ptr<Scope> s;
- s.reset( globalScriptEngine->newScope() );
-
- ASSERT( ! s->getBoolean( "notSet" ) );
-
- s->setString( "emptyString" , "" );
- ASSERT( ! s->getBoolean( "emptyString" ) );
-
- s->setNumber( "notANumberVal" , std::numeric_limits<double>::quiet_NaN());
- ASSERT( ! s->getBoolean( "notANumberVal" ) );
-
- s->setElement( "nullVal" , BSONObjBuilder().appendNull("null").obj().getField("null") );
- ASSERT( ! s->getBoolean( "nullVal" ) );
-
- s->setNumber( "zeroVal" , 0 );
- ASSERT( ! s->getBoolean( "zeroVal" ) );
- }
- };
-
- class SimpleFunctions {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- s->invoke( "x=5;" , 0, 0 );
- ASSERT( 5 == s->getNumber( "x" ) );
-
- s->invoke( "return 17;" , 0, 0 );
- ASSERT( 17 == s->getNumber( "__returnValue" ) );
-
- s->invoke( "function(){ return 17; }" , 0, 0 );
- ASSERT( 17 == s->getNumber( "__returnValue" ) );
-
- s->setNumber( "x" , 1.76 );
- s->invoke( "return x == 1.76; " , 0, 0 );
- ASSERT( s->getBoolean( "__returnValue" ) );
-
- s->setNumber( "x" , 1.76 );
- s->invoke( "return x == 1.79; " , 0, 0 );
- ASSERT( ! s->getBoolean( "__returnValue" ) );
- BSONObj obj = BSON( "" << 11.0 );
- s->invoke( "function( z ){ return 5 + z; }" , &obj, 0 );
- ASSERT_EQUALS( 16 , s->getNumber( "__returnValue" ) );
-
- delete s;
- }
- };
-
- /** Installs a tee for auditing log messages in the same thread. */
- class LogRecordingScope {
- public:
- LogRecordingScope() :
- _logged(false),
- _threadName(mongo::getThreadName()),
- _handle(mongo::logger::globalLogDomain()->attachAppender(
- mongo::logger::MessageLogDomain::AppenderAutoPtr(new Tee(this)))) {
- }
- ~LogRecordingScope() {
- mongo::logger::globalLogDomain()->detachAppender(_handle);
- }
- /** @return most recent log entry. */
- bool logged() const { return _logged; }
private:
- class Tee : public mongo::logger::MessageLogDomain::EventAppender {
- public:
- Tee(LogRecordingScope* scope) : _scope(scope) {}
- virtual ~Tee() {}
- virtual Status append(const logger::MessageEventEphemeral& event) {
- // Don't want to consider logging by background threads.
- if (mongo::getThreadName() == _scope->_threadName) {
- _scope->_logged = true;
- }
- return Status::OK();
- }
- private:
- LogRecordingScope* _scope;
- };
- bool _logged;
- const string _threadName;
- mongo::logger::MessageLogDomain::AppenderHandle _handle;
+ LogRecordingScope* _scope;
};
-
- /** Error logging in Scope::exec(). */
- class ExecLogError {
- public:
- void run() {
- Scope *scope = globalScriptEngine->newScope();
-
- // No error is logged when reportError == false.
- ASSERT( !scope->exec( "notAFunction()", "foo", false, false, false ) );
- ASSERT( !_logger.logged() );
-
- // No error is logged for a valid statement.
- ASSERT( scope->exec( "validStatement = true", "foo", false, true, false ) );
- ASSERT( !_logger.logged() );
-
- // An error is logged for an invalid statement when reportError == true.
- ASSERT( !scope->exec( "notAFunction()", "foo", false, true, false ) );
- ASSERT( _logger.logged() );
+ bool _logged;
+ const string _threadName;
+ mongo::logger::MessageLogDomain::AppenderHandle _handle;
+};
+
+/** Error logging in Scope::exec(). */
+class ExecLogError {
+public:
+ void run() {
+ Scope* scope = globalScriptEngine->newScope();
+
+ // No error is logged when reportError == false.
+ ASSERT(!scope->exec("notAFunction()", "foo", false, false, false));
+ ASSERT(!_logger.logged());
+
+ // No error is logged for a valid statement.
+ ASSERT(scope->exec("validStatement = true", "foo", false, true, false));
+ ASSERT(!_logger.logged());
+
+ // An error is logged for an invalid statement when reportError == true.
+ ASSERT(!scope->exec("notAFunction()", "foo", false, true, false));
+ ASSERT(_logger.logged());
+ }
+
+private:
+ LogRecordingScope _logger;
+};
+
+/** Error logging in Scope::invoke(). */
+class InvokeLogError {
+public:
+ void run() {
+ Scope* scope = globalScriptEngine->newScope();
+
+ // No error is logged for a valid statement.
+ ASSERT_EQUALS(0, scope->invoke("validStatement = true", 0, 0));
+ ASSERT(!_logger.logged());
+
+ // An error is logged for an invalid statement.
+ try {
+ scope->invoke("notAFunction()", 0, 0);
+ } catch (const DBException&) {
+ // ignore the exception; just test that we logged something
}
- private:
- LogRecordingScope _logger;
- };
-
- /** Error logging in Scope::invoke(). */
- class InvokeLogError {
- public:
- void run() {
- Scope *scope = globalScriptEngine->newScope();
-
- // No error is logged for a valid statement.
- ASSERT_EQUALS( 0, scope->invoke( "validStatement = true", 0, 0 ) );
- ASSERT( !_logger.logged() );
+ ASSERT(_logger.logged());
+ }
- // An error is logged for an invalid statement.
- try {
- scope->invoke( "notAFunction()", 0, 0 );
- }
- catch(const DBException&) {
- // ignore the exception; just test that we logged something
- }
- ASSERT( _logger.logged() );
- }
- private:
- LogRecordingScope _logger;
- };
+private:
+ LogRecordingScope _logger;
+};
- class ObjectMapping {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
+class ObjectMapping {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
- BSONObj o = BSON( "x" << 17.0 << "y" << "eliot" << "z" << "sara" );
- s->setObject( "blah" , o );
+ BSONObj o = BSON("x" << 17.0 << "y"
+ << "eliot"
+ << "z"
+ << "sara");
+ s->setObject("blah", o);
- s->invoke( "return blah.x;" , 0, 0 );
- ASSERT_EQUALS( 17 , s->getNumber( "__returnValue" ) );
- s->invoke( "return blah.y;" , 0, 0 );
- ASSERT_EQUALS( "eliot" , s->getString( "__returnValue" ) );
+ s->invoke("return blah.x;", 0, 0);
+ ASSERT_EQUALS(17, s->getNumber("__returnValue"));
+ s->invoke("return blah.y;", 0, 0);
+ ASSERT_EQUALS("eliot", s->getString("__returnValue"));
- s->invoke( "return this.z;" , 0, &o );
- ASSERT_EQUALS( "sara" , s->getString( "__returnValue" ) );
+ s->invoke("return this.z;", 0, &o);
+ ASSERT_EQUALS("sara", s->getString("__returnValue"));
- s->invoke( "return this.z == 'sara';" , 0, &o );
- ASSERT_EQUALS( true , s->getBoolean( "__returnValue" ) );
+ s->invoke("return this.z == 'sara';", 0, &o);
+ ASSERT_EQUALS(true, s->getBoolean("__returnValue"));
- s->invoke( "this.z == 'sara';" , 0, &o );
- ASSERT_EQUALS( true , s->getBoolean( "__returnValue" ) );
+ s->invoke("this.z == 'sara';", 0, &o);
+ ASSERT_EQUALS(true, s->getBoolean("__returnValue"));
- s->invoke( "this.z == 'asara';" , 0, &o );
- ASSERT_EQUALS( false , s->getBoolean( "__returnValue" ) );
+ s->invoke("this.z == 'asara';", 0, &o);
+ ASSERT_EQUALS(false, s->getBoolean("__returnValue"));
- s->invoke( "return this.x == 17;" , 0, &o );
- ASSERT_EQUALS( true , s->getBoolean( "__returnValue" ) );
+ s->invoke("return this.x == 17;", 0, &o);
+ ASSERT_EQUALS(true, s->getBoolean("__returnValue"));
- s->invoke( "return this.x == 18;" , 0, &o );
- ASSERT_EQUALS( false , s->getBoolean( "__returnValue" ) );
+ s->invoke("return this.x == 18;", 0, &o);
+ ASSERT_EQUALS(false, s->getBoolean("__returnValue"));
- s->invoke( "function(){ return this.x == 17; }" , 0, &o );
- ASSERT_EQUALS( true , s->getBoolean( "__returnValue" ) );
+ s->invoke("function(){ return this.x == 17; }", 0, &o);
+ ASSERT_EQUALS(true, s->getBoolean("__returnValue"));
- s->invoke( "function(){ return this.x == 18; }" , 0, &o );
- ASSERT_EQUALS( false , s->getBoolean( "__returnValue" ) );
+ s->invoke("function(){ return this.x == 18; }", 0, &o);
+ ASSERT_EQUALS(false, s->getBoolean("__returnValue"));
- s->invoke( "function (){ return this.x == 17; }" , 0, &o );
- ASSERT_EQUALS( true , s->getBoolean( "__returnValue" ) );
+ s->invoke("function (){ return this.x == 17; }", 0, &o);
+ ASSERT_EQUALS(true, s->getBoolean("__returnValue"));
- s->invoke( "function z(){ return this.x == 18; }" , 0, &o );
- ASSERT_EQUALS( false , s->getBoolean( "__returnValue" ) );
+ s->invoke("function z(){ return this.x == 18; }", 0, &o);
+ ASSERT_EQUALS(false, s->getBoolean("__returnValue"));
- s->invoke( "function (){ this.x == 17; }" , 0, &o );
- ASSERT_EQUALS( false , s->getBoolean( "__returnValue" ) );
+ s->invoke("function (){ this.x == 17; }", 0, &o);
+ ASSERT_EQUALS(false, s->getBoolean("__returnValue"));
- s->invoke( "function z(){ this.x == 18; }" , 0, &o );
- ASSERT_EQUALS( false , s->getBoolean( "__returnValue" ) );
+ s->invoke("function z(){ this.x == 18; }", 0, &o);
+ ASSERT_EQUALS(false, s->getBoolean("__returnValue"));
- s->invoke( "x = 5; for( ; x <10; x++){ a = 1; }" , 0, &o );
- ASSERT_EQUALS( 10 , s->getNumber( "x" ) );
+ s->invoke("x = 5; for( ; x <10; x++){ a = 1; }", 0, &o);
+ ASSERT_EQUALS(10, s->getNumber("x"));
- delete s;
- }
- };
+ delete s;
+ }
+};
- class ObjectDecoding {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
+class ObjectDecoding {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
- s->invoke( "z = { num : 1 };" , 0, 0 );
- BSONObj out = s->getObject( "z" );
- ASSERT_EQUALS( 1 , out["num"].number() );
- ASSERT_EQUALS( 1 , out.nFields() );
+ s->invoke("z = { num : 1 };", 0, 0);
+ BSONObj out = s->getObject("z");
+ ASSERT_EQUALS(1, out["num"].number());
+ ASSERT_EQUALS(1, out.nFields());
- s->invoke( "z = { x : 'eliot' };" , 0, 0 );
- out = s->getObject( "z" );
- ASSERT_EQUALS( (string)"eliot" , out["x"].valuestr() );
- ASSERT_EQUALS( 1 , out.nFields() );
+ s->invoke("z = { x : 'eliot' };", 0, 0);
+ out = s->getObject("z");
+ ASSERT_EQUALS((string) "eliot", out["x"].valuestr());
+ ASSERT_EQUALS(1, out.nFields());
- BSONObj o = BSON( "x" << 17 );
- s->setObject( "blah" , o );
- out = s->getObject( "blah" );
- ASSERT_EQUALS( 17 , out["x"].number() );
+ BSONObj o = BSON("x" << 17);
+ s->setObject("blah", o);
+ out = s->getObject("blah");
+ ASSERT_EQUALS(17, out["x"].number());
- delete s;
- }
- };
+ delete s;
+ }
+};
- class JSOIDTests {
- public:
- void run() {
+class JSOIDTests {
+public:
+ void run() {
#ifdef MOZJS
- Scope * s = globalScriptEngine->newScope();
+ Scope* s = globalScriptEngine->newScope();
- s->localConnect( "blah" );
+ s->localConnect("blah");
- s->invoke( "z = { _id : new ObjectId() , a : 123 };" , 0, 0 );
- BSONObj out = s->getObject( "z" );
- ASSERT_EQUALS( 123 , out["a"].number() );
- ASSERT_EQUALS( jstOID , out["_id"].type() );
+ s->invoke("z = { _id : new ObjectId() , a : 123 };", 0, 0);
+ BSONObj out = s->getObject("z");
+ ASSERT_EQUALS(123, out["a"].number());
+ ASSERT_EQUALS(jstOID, out["_id"].type());
- OID save = out["_id"].__oid();
+ OID save = out["_id"].__oid();
- s->setObject( "a" , out );
+ s->setObject("a", out);
- s->invoke( "y = { _id : a._id , a : 124 };" , 0, 0 );
- out = s->getObject( "y" );
- ASSERT_EQUALS( 124 , out["a"].number() );
- ASSERT_EQUALS( jstOID , out["_id"].type() );
- ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
+ s->invoke("y = { _id : a._id , a : 124 };", 0, 0);
+ out = s->getObject("y");
+ ASSERT_EQUALS(124, out["a"].number());
+ ASSERT_EQUALS(jstOID, out["_id"].type());
+ ASSERT_EQUALS(out["_id"].__oid().str(), save.str());
- s->invoke( "y = { _id : new ObjectId( a._id ) , a : 125 };" , 0, 0 );
- out = s->getObject( "y" );
- ASSERT_EQUALS( 125 , out["a"].number() );
- ASSERT_EQUALS( jstOID , out["_id"].type() );
- ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
+ s->invoke("y = { _id : new ObjectId( a._id ) , a : 125 };", 0, 0);
+ out = s->getObject("y");
+ ASSERT_EQUALS(125, out["a"].number());
+ ASSERT_EQUALS(jstOID, out["_id"].type());
+ ASSERT_EQUALS(out["_id"].__oid().str(), save.str());
- delete s;
+ delete s;
#endif
- }
- };
-
- class SetImplicit {
- public:
- void run() {
- Scope *s = globalScriptEngine->newScope();
-
- BSONObj o = BSON( "foo" << "bar" );
- s->setObject( "a.b", o );
- ASSERT( s->getObject( "a" ).isEmpty() );
-
- BSONObj o2 = BSONObj();
- s->setObject( "a", o2 );
- s->setObject( "a.b", o );
- ASSERT( s->getObject( "a" ).isEmpty() );
-
- o2 = fromjson( "{b:{}}" );
- s->setObject( "a", o2 );
- s->setObject( "a.b", o );
- ASSERT( !s->getObject( "a" ).isEmpty() );
- }
- };
-
- class ObjectModReadonlyTests {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- BSONObj o = BSON( "x" << 17 << "y" << "eliot" << "z" << "sara" << "zz" << BSONObj() );
- s->setObject( "blah" , o , true );
-
- s->invoke( "blah.y = 'e'", 0, 0 );
- BSONObj out = s->getObject( "blah" );
- ASSERT( strlen( out["y"].valuestr() ) > 1 );
-
- s->invoke( "blah.a = 19;" , 0, 0 );
- out = s->getObject( "blah" );
- ASSERT( out["a"].eoo() );
-
- s->invoke( "blah.zz.a = 19;" , 0, 0 );
- out = s->getObject( "blah" );
- ASSERT( out["zz"].embeddedObject()["a"].eoo() );
-
- s->setObject( "blah.zz", BSON( "a" << 19 ) );
- out = s->getObject( "blah" );
- ASSERT( out["zz"].embeddedObject()["a"].eoo() );
-
- s->invoke( "delete blah['x']" , 0, 0 );
- out = s->getObject( "blah" );
- ASSERT( !out["x"].eoo() );
-
- // read-only object itself can be overwritten
- s->invoke( "blah = {}", 0, 0 );
- out = s->getObject( "blah" );
- ASSERT( out.isEmpty() );
-
- // test array - can't implement this in v8
-// o = fromjson( "{a:[1,2,3]}" );
-// s->setObject( "blah", o, true );
-// out = s->getObject( "blah" );
-// s->invoke( "blah.a[ 0 ] = 4;", BSONObj() );
-// s->invoke( "delete blah['a'][ 2 ];", BSONObj() );
-// out = s->getObject( "blah" );
-// ASSERT_EQUALS( 1.0, out[ "a" ].embeddedObject()[ 0 ].number() );
-// ASSERT_EQUALS( 3.0, out[ "a" ].embeddedObject()[ 2 ].number() );
-
- delete s;
- }
- };
-
- class OtherJSTypes {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- {
- // date
- BSONObj o;
- {
- BSONObjBuilder b;
- b.appendDate( "d" , 123456789 );
- o = b.obj();
- }
- s->setObject( "x" , o );
-
- s->invoke( "return x.d.getTime() != 12;" , 0, 0 );
- ASSERT_EQUALS( true, s->getBoolean( "__returnValue" ) );
-
- s->invoke( "z = x.d.getTime();" , 0, 0 );
- ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) );
-
- s->invoke( "z = { z : x.d }" , 0, 0 );
- BSONObj out = s->getObject( "z" );
- ASSERT( out["z"].type() == Date );
- }
-
- {
- // regex
- BSONObj o;
- {
- BSONObjBuilder b;
- b.appendRegex( "r" , "^a" , "i" );
- o = b.obj();
- }
- s->setObject( "x" , o );
-
- s->invoke( "z = x.r.test( 'b' );" , 0, 0 );
- ASSERT_EQUALS( false , s->getBoolean( "z" ) );
-
- s->invoke( "z = x.r.test( 'a' );" , 0, 0 );
- ASSERT_EQUALS( true , s->getBoolean( "z" ) );
-
- s->invoke( "z = x.r.test( 'ba' );" , 0, 0 );
- ASSERT_EQUALS( false , s->getBoolean( "z" ) );
-
- s->invoke( "z = { a : x.r };" , 0, 0 );
-
- BSONObj out = s->getObject("z");
- ASSERT_EQUALS( (string)"^a" , out["a"].regex() );
- ASSERT_EQUALS( (string)"i" , out["a"].regexFlags() );
-
- // This regex used to cause a segfault because x isn't a valid flag for a js RegExp.
- // Now it throws a JS exception.
- BSONObj invalidRegex = BSON_ARRAY(BSON("regex" << BSONRegEx("asdf", "x")));
- const char* code = "function (obj) {"
- " var threw = false;"
- " try {"
- " obj.regex;" // should throw
- " } catch(e) {"
- " threw = true;"
- " }"
- " assert(threw);"
- "}";
- ASSERT_EQUALS(s->invoke(code, &invalidRegex, NULL), 0);
- }
-
- // array
- {
- BSONObj o = fromjson( "{r:[1,2,3]}" );
- s->setObject( "x", o, false );
- BSONObj out = s->getObject( "x" );
- ASSERT_EQUALS( Array, out.firstElement().type() );
-
- s->setObject( "x", o, true );
- out = s->getObject( "x" );
- ASSERT_EQUALS( Array, out.firstElement().type() );
- }
-
- // symbol
- {
- // test mutable object with symbol type
- BSONObjBuilder builder;
- builder.appendSymbol("sym", "value");
- BSONObj in = builder.done();
- s->setObject( "x", in, false );
- BSONObj out = s->getObject( "x" );
- ASSERT_EQUALS( Symbol, out.firstElement().type() );
-
- // readonly
- s->setObject( "x", in, true );
- out = s->getObject( "x" );
- ASSERT_EQUALS( Symbol, out.firstElement().type() );
- }
-
- delete s;
- }
- };
-
- class SpecialDBTypes {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- BSONObjBuilder b;
- b.appendTimestamp( "a" , 123456789 );
- b.appendMinKey( "b" );
- b.appendMaxKey( "c" );
- b.appendTimestamp( "d" , 1234000 , 9876 );
-
-
- {
- BSONObj t = b.done();
- ASSERT_EQUALS( 1234000U , t["d"].timestampTime() );
- ASSERT_EQUALS( 9876U , t["d"].timestampInc() );
- }
-
- s->setObject( "z" , b.obj() );
-
- ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , 0, 0 ) == 0 );
-
- BSONObj out = s->getObject( "y" );
- ASSERT_EQUALS( Timestamp , out["a"].type() );
- ASSERT_EQUALS( MinKey , out["b"].type() );
- ASSERT_EQUALS( MaxKey , out["c"].type() );
- ASSERT_EQUALS( Timestamp , out["d"].type() );
-
- ASSERT_EQUALS( 9876U , out["d"].timestampInc() );
- ASSERT_EQUALS( 1234000U , out["d"].timestampTime() );
- ASSERT_EQUALS( 123456789U , out["a"].date() );
-
- delete s;
- }
- };
-
- class TypeConservation {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- // -- A --
-
+ }
+};
+
+class SetImplicit {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON("foo"
+ << "bar");
+ s->setObject("a.b", o);
+ ASSERT(s->getObject("a").isEmpty());
+
+ BSONObj o2 = BSONObj();
+ s->setObject("a", o2);
+ s->setObject("a.b", o);
+ ASSERT(s->getObject("a").isEmpty());
+
+ o2 = fromjson("{b:{}}");
+ s->setObject("a", o2);
+ s->setObject("a.b", o);
+ ASSERT(!s->getObject("a").isEmpty());
+ }
+};
+
+class ObjectModReadonlyTests {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON("x" << 17 << "y"
+ << "eliot"
+ << "z"
+ << "sara"
+ << "zz" << BSONObj());
+ s->setObject("blah", o, true);
+
+ s->invoke("blah.y = 'e'", 0, 0);
+ BSONObj out = s->getObject("blah");
+ ASSERT(strlen(out["y"].valuestr()) > 1);
+
+ s->invoke("blah.a = 19;", 0, 0);
+ out = s->getObject("blah");
+ ASSERT(out["a"].eoo());
+
+ s->invoke("blah.zz.a = 19;", 0, 0);
+ out = s->getObject("blah");
+ ASSERT(out["zz"].embeddedObject()["a"].eoo());
+
+ s->setObject("blah.zz", BSON("a" << 19));
+ out = s->getObject("blah");
+ ASSERT(out["zz"].embeddedObject()["a"].eoo());
+
+ s->invoke("delete blah['x']", 0, 0);
+ out = s->getObject("blah");
+ ASSERT(!out["x"].eoo());
+
+ // read-only object itself can be overwritten
+ s->invoke("blah = {}", 0, 0);
+ out = s->getObject("blah");
+ ASSERT(out.isEmpty());
+
+ // test array - can't implement this in v8
+ // o = fromjson( "{a:[1,2,3]}" );
+ // s->setObject( "blah", o, true );
+ // out = s->getObject( "blah" );
+ // s->invoke( "blah.a[ 0 ] = 4;", BSONObj() );
+ // s->invoke( "delete blah['a'][ 2 ];", BSONObj() );
+ // out = s->getObject( "blah" );
+ // ASSERT_EQUALS( 1.0, out[ "a" ].embeddedObject()[ 0 ].number() );
+ // ASSERT_EQUALS( 3.0, out[ "a" ].embeddedObject()[ 2 ].number() );
+
+ delete s;
+ }
+};
+
+class OtherJSTypes {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ {
+ // date
BSONObj o;
{
- BSONObjBuilder b ;
- b.append( "a" , (int)5 );
- b.append( "b" , 5.6 );
+ BSONObjBuilder b;
+ b.appendDate("d", 123456789);
o = b.obj();
}
- ASSERT_EQUALS( NumberInt , o["a"].type() );
- ASSERT_EQUALS( NumberDouble , o["b"].type() );
+ s->setObject("x", o);
- s->setObject( "z" , o );
- s->invoke( "return z" , 0, 0 );
- BSONObj out = s->getObject( "__returnValue" );
- ASSERT_EQUALS( 5 , out["a"].number() );
- ASSERT_EQUALS( 5.6 , out["b"].number() );
+ s->invoke("return x.d.getTime() != 12;", 0, 0);
+ ASSERT_EQUALS(true, s->getBoolean("__returnValue"));
- ASSERT_EQUALS( NumberDouble , out["b"].type() );
- ASSERT_EQUALS( NumberInt , out["a"].type() );
+ s->invoke("z = x.d.getTime();", 0, 0);
+ ASSERT_EQUALS(123456789, s->getNumber("z"));
- // -- B --
+ s->invoke("z = { z : x.d }", 0, 0);
+ BSONObj out = s->getObject("z");
+ ASSERT(out["z"].type() == Date);
+ }
+ {
+ // regex
+ BSONObj o;
{
- BSONObjBuilder b ;
- b.append( "a" , (int)5 );
- b.append( "b" , 5.6 );
+ BSONObjBuilder b;
+ b.appendRegex("r", "^a", "i");
o = b.obj();
}
+ s->setObject("x", o);
+
+ s->invoke("z = x.r.test( 'b' );", 0, 0);
+ ASSERT_EQUALS(false, s->getBoolean("z"));
+
+ s->invoke("z = x.r.test( 'a' );", 0, 0);
+ ASSERT_EQUALS(true, s->getBoolean("z"));
+
+ s->invoke("z = x.r.test( 'ba' );", 0, 0);
+ ASSERT_EQUALS(false, s->getBoolean("z"));
+
+ s->invoke("z = { a : x.r };", 0, 0);
+
+ BSONObj out = s->getObject("z");
+ ASSERT_EQUALS((string) "^a", out["a"].regex());
+ ASSERT_EQUALS((string) "i", out["a"].regexFlags());
+
+ // This regex used to cause a segfault because x isn't a valid flag for a js RegExp.
+ // Now it throws a JS exception.
+ BSONObj invalidRegex = BSON_ARRAY(BSON("regex" << BSONRegEx("asdf", "x")));
+ const char* code =
+ "function (obj) {"
+ " var threw = false;"
+ " try {"
+ " obj.regex;" // should throw
+ " } catch(e) {"
+ " threw = true;"
+ " }"
+ " assert(threw);"
+ "}";
+ ASSERT_EQUALS(s->invoke(code, &invalidRegex, NULL), 0);
+ }
- s->setObject( "z" , o , false );
- s->invoke( "return z" , 0, 0 );
- out = s->getObject( "__returnValue" );
- ASSERT_EQUALS( 5 , out["a"].number() );
- ASSERT_EQUALS( 5.6 , out["b"].number() );
+ // array
+ {
+ BSONObj o = fromjson("{r:[1,2,3]}");
+ s->setObject("x", o, false);
+ BSONObj out = s->getObject("x");
+ ASSERT_EQUALS(Array, out.firstElement().type());
- ASSERT_EQUALS( NumberDouble , out["b"].type() );
- ASSERT_EQUALS( NumberInt , out["a"].type() );
+ s->setObject("x", o, true);
+ out = s->getObject("x");
+ ASSERT_EQUALS(Array, out.firstElement().type());
+ }
+ // symbol
+ {
+ // test mutable object with symbol type
+ BSONObjBuilder builder;
+ builder.appendSymbol("sym", "value");
+ BSONObj in = builder.done();
+ s->setObject("x", in, false);
+ BSONObj out = s->getObject("x");
+ ASSERT_EQUALS(Symbol, out.firstElement().type());
+
+ // readonly
+ s->setObject("x", in, true);
+ out = s->getObject("x");
+ ASSERT_EQUALS(Symbol, out.firstElement().type());
+ }
- // -- C --
+ delete s;
+ }
+};
- {
- BSONObjBuilder b ;
+class SpecialDBTypes {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
- {
- BSONObjBuilder c;
- c.append( "0" , 5.5 );
- c.append( "1" , 6 );
- b.appendArray( "a" , c.obj() );
- }
+ BSONObjBuilder b;
+ b.appendTimestamp("a", 123456789);
+ b.appendMinKey("b");
+ b.appendMaxKey("c");
+ b.appendTimestamp("d", 1234000, 9876);
- o = b.obj();
- }
- ASSERT_EQUALS( NumberDouble , o["a"].embeddedObjectUserCheck()["0"].type() );
- ASSERT_EQUALS( NumberInt , o["a"].embeddedObjectUserCheck()["1"].type() );
+ {
+ BSONObj t = b.done();
+ ASSERT_EQUALS(1234000U, t["d"].timestampTime());
+ ASSERT_EQUALS(9876U, t["d"].timestampInc());
+ }
- s->setObject( "z" , o , false );
- out = s->getObject( "z" );
+ s->setObject("z", b.obj());
- ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
- ASSERT_EQUALS( NumberInt , out["a"].embeddedObjectUserCheck()["1"].type() );
+ ASSERT(s->invoke("y = { a : z.a , b : z.b , c : z.c , d: z.d }", 0, 0) == 0);
- s->invokeSafe( "z.z = 5;" , 0, 0 );
- out = s->getObject( "z" );
- ASSERT_EQUALS( 5 , out["z"].number() );
- ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
- // Commenting so that v8 tests will work
-// ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior
+ BSONObj out = s->getObject("y");
+ ASSERT_EQUALS(Timestamp, out["a"].type());
+ ASSERT_EQUALS(MinKey, out["b"].type());
+ ASSERT_EQUALS(MaxKey, out["c"].type());
+ ASSERT_EQUALS(Timestamp, out["d"].type());
+ ASSERT_EQUALS(9876U, out["d"].timestampInc());
+ ASSERT_EQUALS(1234000U, out["d"].timestampTime());
+ ASSERT_EQUALS(123456789U, out["a"].date());
- // Eliot says I don't have to worry about this case
+ delete s;
+ }
+};
-// // -- D --
-//
-// o = fromjson( "{a:3.0,b:4.5}" );
-// ASSERT_EQUALS( NumberDouble , o["a"].type() );
-// ASSERT_EQUALS( NumberDouble , o["b"].type() );
-//
-// s->setObject( "z" , o , false );
-// s->invoke( "return z" , BSONObj() );
-// out = s->getObject( "__returnValue" );
-// ASSERT_EQUALS( 3 , out["a"].number() );
-// ASSERT_EQUALS( 4.5 , out["b"].number() );
-//
-// ASSERT_EQUALS( NumberDouble , out["b"].type() );
-// ASSERT_EQUALS( NumberDouble , out["a"].type() );
-//
+class TypeConservation {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
- delete s;
- }
+ // -- A --
- };
-
- class NumberLong {
- public:
- void run() {
- auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ BSONObj o;
+ {
BSONObjBuilder b;
- long long val = (long long)( 0xbabadeadbeefbaddULL );
- b.append( "a", val );
- BSONObj in = b.obj();
- s->setObject( "a", in );
- BSONObj out = s->getObject( "a" );
- ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
-
- ASSERT( s->exec( "b = {b:a.a}", "foo", false, true, false ) );
- out = s->getObject( "b" );
- ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
- if( val != out.firstElement().numberLong() ) {
- cout << val << endl;
- cout << out.firstElement().numberLong() << endl;
- cout << out.toString() << endl;
- ASSERT_EQUALS( val, out.firstElement().numberLong() );
- }
-
- ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
- out = s->getObject( "c" );
- stringstream ss;
- ss << "NumberLong(\"" << val << "\")";
- ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
-
- ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
- out = s->getObject( "d" );
- ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
- ASSERT_EQUALS( double( val ), out.firstElement().number() );
-
- ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
- out = s->getObject( "e" );
- ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
- ASSERT_EQUALS( double( val ), out.firstElement().number() );
-
- ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
- out = s->getObject( "f" );
- ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
-
- s->setObject( "z", BSON( "z" << (long long)( 4 ) ) );
- ASSERT( s->exec( "y = {y:z.z.top}", "foo", false, true, false ) );
- out = s->getObject( "y" );
- ASSERT_EQUALS( Undefined, out.firstElement().type() );
-
- ASSERT( s->exec( "x = {x:z.z.floatApprox}", "foo", false, true, false ) );
- out = s->getObject( "x" );
- ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
- ASSERT_EQUALS( double( 4 ), out.firstElement().number() );
-
- ASSERT( s->exec( "w = {w:z.z}", "foo", false, true, false ) );
- out = s->getObject( "w" );
- ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
- ASSERT_EQUALS( 4, out.firstElement().numberLong() );
-
+ b.append("a", (int)5);
+ b.append("b", 5.6);
+ o = b.obj();
}
- };
+ ASSERT_EQUALS(NumberInt, o["a"].type());
+ ASSERT_EQUALS(NumberDouble, o["b"].type());
- class NumberLong2 {
- public:
- void run() {
- auto_ptr<Scope> s( globalScriptEngine->newScope() );
-
- BSONObj in;
- {
- BSONObjBuilder b;
- b.append( "a" , 5 );
- b.append( "b" , (long long)5 );
- b.append( "c" , (long long)pow( 2.0, 29 ) );
- b.append( "d" , (long long)pow( 2.0, 30 ) );
- b.append( "e" , (long long)pow( 2.0, 31 ) );
- b.append( "f" , (long long)pow( 2.0, 45 ) );
- in = b.obj();
- }
- s->setObject( "a" , in );
-
- ASSERT( s->exec( "x = tojson( a ); " ,"foo" , false , true , false ) );
- string outString = s->getString( "x" );
+ s->setObject("z", o);
+ s->invoke("return z", 0, 0);
+ BSONObj out = s->getObject("__returnValue");
+ ASSERT_EQUALS(5, out["a"].number());
+ ASSERT_EQUALS(5.6, out["b"].number());
- ASSERT( s->exec( (string)"y = " + outString , "foo2" , false , true , false ) );
- BSONObj out = s->getObject( "y" );
- ASSERT_EQUALS( in , out );
- }
- };
+ ASSERT_EQUALS(NumberDouble, out["b"].type());
+ ASSERT_EQUALS(NumberInt, out["a"].type());
- class NumberLongUnderLimit {
- public:
- void run() {
- auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ // -- B --
+ {
BSONObjBuilder b;
- // limit is 2^53
- long long val = (long long)( 9007199254740991ULL );
- b.append( "a", val );
- BSONObj in = b.obj();
- s->setObject( "a", in );
- BSONObj out = s->getObject( "a" );
- ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
-
- ASSERT( s->exec( "b = {b:a.a}", "foo", false, true, false ) );
- out = s->getObject( "b" );
- ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
- if( val != out.firstElement().numberLong() ) {
- cout << val << endl;
- cout << out.firstElement().numberLong() << endl;
- cout << out.toString() << endl;
- ASSERT_EQUALS( val, out.firstElement().numberLong() );
- }
-
- ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
- out = s->getObject( "c" );
- stringstream ss;
- ss << "NumberLong(\"" << val << "\")";
- ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
-
- ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
- out = s->getObject( "d" );
- ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
- ASSERT_EQUALS( double( val ), out.firstElement().number() );
-
- ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
- out = s->getObject( "e" );
- ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
- ASSERT_EQUALS( double( val ), out.firstElement().number() );
-
- ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
- out = s->getObject( "f" );
- ASSERT( Undefined == out.firstElement().type() );
+ b.append("a", (int)5);
+ b.append("b", 5.6);
+ o = b.obj();
}
- };
- class InvalidTimestamp {
- public:
- void run() {
- auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ s->setObject("z", o, false);
+ s->invoke("return z", 0, 0);
+ out = s->getObject("__returnValue");
+ ASSERT_EQUALS(5, out["a"].number());
+ ASSERT_EQUALS(5.6, out["b"].number());
- // Timestamp 't' component cannot exceed max for int32_t.
- // Use appendTimestamp(field, Date) to bypass OpTime construction.
- BSONObj in;
- {
- BSONObjBuilder b;
- b.appendTimestamp( "a", std::numeric_limits<unsigned long long>::max() );
- in = b.obj();
- }
- s->setObject( "a" , in );
+ ASSERT_EQUALS(NumberDouble, out["b"].type());
+ ASSERT_EQUALS(NumberInt, out["a"].type());
- ASSERT_FALSE( s->exec( "x = tojson( a ); " ,"foo" , false , true , false ) );
- }
- };
- class WeirdObjects {
- public:
+ // -- C --
- BSONObj build( int depth ) {
+ {
BSONObjBuilder b;
- b.append( "0" , depth );
- if ( depth > 0 )
- b.appendArray( "1" , build( depth - 1 ) );
- return b.obj();
- }
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- for ( int i=5; i<100 ; i += 10 ) {
- s->setObject( "a" , build(i) , false );
- s->invokeSafe( "tojson( a )" , 0, 0 );
-
- s->setObject( "a" , build(5) , true );
- s->invokeSafe( "tojson( a )" , 0, 0 );
+ {
+ BSONObjBuilder c;
+ c.append("0", 5.5);
+ c.append("1", 6);
+ b.appendArray("a", c.obj());
}
- delete s;
+ o = b.obj();
}
- };
-
- /**
- * Test exec() timeout value terminates execution (SERVER-8053)
- */
- class ExecTimeout {
- public:
- void run() {
- scoped_ptr<Scope> scope(globalScriptEngine->newScope());
- // assert timeout occurred
- ASSERT(!scope->exec("var a = 1; while (true) { ; }",
- "ExecTimeout", false, true, false, 1));
+ ASSERT_EQUALS(NumberDouble, o["a"].embeddedObjectUserCheck()["0"].type());
+ ASSERT_EQUALS(NumberInt, o["a"].embeddedObjectUserCheck()["1"].type());
+
+ s->setObject("z", o, false);
+ out = s->getObject("z");
+
+ ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type());
+ ASSERT_EQUALS(NumberInt, out["a"].embeddedObjectUserCheck()["1"].type());
+
+ s->invokeSafe("z.z = 5;", 0, 0);
+ out = s->getObject("z");
+ ASSERT_EQUALS(5, out["z"].number());
+ ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type());
+ // Commenting so that v8 tests will work
+ // ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior
+
+
+ // Eliot says I don't have to worry about this case
+
+ // // -- D --
+ //
+ // o = fromjson( "{a:3.0,b:4.5}" );
+ // ASSERT_EQUALS( NumberDouble , o["a"].type() );
+ // ASSERT_EQUALS( NumberDouble , o["b"].type() );
+ //
+ // s->setObject( "z" , o , false );
+ // s->invoke( "return z" , BSONObj() );
+ // out = s->getObject( "__returnValue" );
+ // ASSERT_EQUALS( 3 , out["a"].number() );
+ // ASSERT_EQUALS( 4.5 , out["b"].number() );
+ //
+ // ASSERT_EQUALS( NumberDouble , out["b"].type() );
+ // ASSERT_EQUALS( NumberDouble , out["a"].type() );
+ //
+
+ delete s;
+ }
+};
+
+class NumberLong {
+public:
+ void run() {
+ auto_ptr<Scope> s(globalScriptEngine->newScope());
+ BSONObjBuilder b;
+ long long val = (long long)(0xbabadeadbeefbaddULL);
+ b.append("a", val);
+ BSONObj in = b.obj();
+ s->setObject("a", in);
+ BSONObj out = s->getObject("a");
+ ASSERT_EQUALS(mongo::NumberLong, out.firstElement().type());
+
+ ASSERT(s->exec("b = {b:a.a}", "foo", false, true, false));
+ out = s->getObject("b");
+ ASSERT_EQUALS(mongo::NumberLong, out.firstElement().type());
+ if (val != out.firstElement().numberLong()) {
+ cout << val << endl;
+ cout << out.firstElement().numberLong() << endl;
+ cout << out.toString() << endl;
+ ASSERT_EQUALS(val, out.firstElement().numberLong());
}
- };
- /**
- * Test exec() timeout value terminates execution (SERVER-8053)
- */
- class ExecNoTimeout {
- public:
- void run() {
- scoped_ptr<Scope> scope(globalScriptEngine->newScope());
-
- // assert no timeout occurred
- ASSERT(scope->exec("var a = function() { return 1; }",
- "ExecNoTimeout", false, true, false, 5 * 60 * 1000));
- }
- };
-
- /**
- * Test invoke() timeout value terminates execution (SERVER-8053)
- */
- class InvokeTimeout {
- public:
- void run() {
- scoped_ptr<Scope> scope(globalScriptEngine->newScope());
-
- // scope timeout after 500ms
- bool caught = false;
- try {
- scope->invokeSafe("function() { "
- " while (true) { } "
- "} ",
- 0, 0, 1);
- } catch (const DBException&) {
- caught = true;
- }
- ASSERT(caught);
+ ASSERT(s->exec("c = {c:a.a.toString()}", "foo", false, true, false));
+ out = s->getObject("c");
+ stringstream ss;
+ ss << "NumberLong(\"" << val << "\")";
+ ASSERT_EQUALS(ss.str(), out.firstElement().valuestr());
+
+ ASSERT(s->exec("d = {d:a.a.toNumber()}", "foo", false, true, false));
+ out = s->getObject("d");
+ ASSERT_EQUALS(NumberDouble, out.firstElement().type());
+ ASSERT_EQUALS(double(val), out.firstElement().number());
+
+ ASSERT(s->exec("e = {e:a.a.floatApprox}", "foo", false, true, false));
+ out = s->getObject("e");
+ ASSERT_EQUALS(NumberDouble, out.firstElement().type());
+ ASSERT_EQUALS(double(val), out.firstElement().number());
+
+ ASSERT(s->exec("f = {f:a.a.top}", "foo", false, true, false));
+ out = s->getObject("f");
+ ASSERT(NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type());
+
+ s->setObject("z", BSON("z" << (long long)(4)));
+ ASSERT(s->exec("y = {y:z.z.top}", "foo", false, true, false));
+ out = s->getObject("y");
+ ASSERT_EQUALS(Undefined, out.firstElement().type());
+
+ ASSERT(s->exec("x = {x:z.z.floatApprox}", "foo", false, true, false));
+ out = s->getObject("x");
+ ASSERT(NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type());
+ ASSERT_EQUALS(double(4), out.firstElement().number());
+
+ ASSERT(s->exec("w = {w:z.z}", "foo", false, true, false));
+ out = s->getObject("w");
+ ASSERT_EQUALS(mongo::NumberLong, out.firstElement().type());
+ ASSERT_EQUALS(4, out.firstElement().numberLong());
+ }
+};
+
+class NumberLong2 {
+public:
+ void run() {
+ auto_ptr<Scope> s(globalScriptEngine->newScope());
+
+ BSONObj in;
+ {
+ BSONObjBuilder b;
+ b.append("a", 5);
+ b.append("b", (long long)5);
+ b.append("c", (long long)pow(2.0, 29));
+ b.append("d", (long long)pow(2.0, 30));
+ b.append("e", (long long)pow(2.0, 31));
+ b.append("f", (long long)pow(2.0, 45));
+ in = b.obj();
}
- };
-
- /**
- * Test invoke() timeout value does not terminate execution (SERVER-8053)
- */
- class InvokeNoTimeout {
- public:
- void run() {
- scoped_ptr<Scope> scope(globalScriptEngine->newScope());
-
- // invoke completes before timeout
- scope->invokeSafe("function() { "
- " for (var i=0; i<1; i++) { ; } "
- "} ",
- 0, 0, 5 * 60 * 1000);
+ s->setObject("a", in);
+
+ ASSERT(s->exec("x = tojson( a ); ", "foo", false, true, false));
+ string outString = s->getString("x");
+
+ ASSERT(s->exec((string) "y = " + outString, "foo2", false, true, false));
+ BSONObj out = s->getObject("y");
+ ASSERT_EQUALS(in, out);
+ }
+};
+
+class NumberLongUnderLimit {
+public:
+ void run() {
+ auto_ptr<Scope> s(globalScriptEngine->newScope());
+
+ BSONObjBuilder b;
+ // limit is 2^53
+ long long val = (long long)(9007199254740991ULL);
+ b.append("a", val);
+ BSONObj in = b.obj();
+ s->setObject("a", in);
+ BSONObj out = s->getObject("a");
+ ASSERT_EQUALS(mongo::NumberLong, out.firstElement().type());
+
+ ASSERT(s->exec("b = {b:a.a}", "foo", false, true, false));
+ out = s->getObject("b");
+ ASSERT_EQUALS(mongo::NumberLong, out.firstElement().type());
+ if (val != out.firstElement().numberLong()) {
+ cout << val << endl;
+ cout << out.firstElement().numberLong() << endl;
+ cout << out.toString() << endl;
+ ASSERT_EQUALS(val, out.firstElement().numberLong());
}
- };
-
- class Utf8Check {
- public:
- Utf8Check() { reset(); }
- ~Utf8Check() { reset(); }
- void run() {
- if( !globalScriptEngine->utf8Ok() ) {
- mongo::unittest::log() << "warning: utf8 not supported" << endl;
- return;
- }
- string utf8ObjSpec = "{'_id':'\\u0001\\u007f\\u07ff\\uffff'}";
- BSONObj utf8Obj = fromjson( utf8ObjSpec );
-
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.insert( ns(), utf8Obj );
- client.eval( "unittest", "v = db.jstests.utf8check.findOne(); db.jstests.utf8check.remove( {} ); db.jstests.utf8check.insert( v );" );
- check( utf8Obj, client.findOne( ns(), BSONObj() ) );
+ ASSERT(s->exec("c = {c:a.a.toString()}", "foo", false, true, false));
+ out = s->getObject("c");
+ stringstream ss;
+ ss << "NumberLong(\"" << val << "\")";
+ ASSERT_EQUALS(ss.str(), out.firstElement().valuestr());
+
+ ASSERT(s->exec("d = {d:a.a.toNumber()}", "foo", false, true, false));
+ out = s->getObject("d");
+ ASSERT_EQUALS(NumberDouble, out.firstElement().type());
+ ASSERT_EQUALS(double(val), out.firstElement().number());
+
+ ASSERT(s->exec("e = {e:a.a.floatApprox}", "foo", false, true, false));
+ out = s->getObject("e");
+ ASSERT_EQUALS(NumberDouble, out.firstElement().type());
+ ASSERT_EQUALS(double(val), out.firstElement().number());
+
+ ASSERT(s->exec("f = {f:a.a.top}", "foo", false, true, false));
+ out = s->getObject("f");
+ ASSERT(Undefined == out.firstElement().type());
+ }
+};
+
+class InvalidTimestamp {
+public:
+ void run() {
+ auto_ptr<Scope> s(globalScriptEngine->newScope());
+
+ // Timestamp 't' component cannot exceed max for int32_t.
+ // Use appendTimestamp(field, Date) to bypass OpTime construction.
+ BSONObj in;
+ {
+ BSONObjBuilder b;
+ b.appendTimestamp("a", std::numeric_limits<unsigned long long>::max());
+ in = b.obj();
}
- private:
- void check( const BSONObj &one, const BSONObj &two ) {
- if ( one.woCompare( two ) != 0 ) {
- static string fail = string( "Assertion failure expected " ) + one.toString() + ", got " + two.toString();
- FAIL( fail.c_str() );
- }
+ s->setObject("a", in);
+
+ ASSERT_FALSE(s->exec("x = tojson( a ); ", "foo", false, true, false));
+ }
+};
+
+class WeirdObjects {
+public:
+ BSONObj build(int depth) {
+ BSONObjBuilder b;
+ b.append("0", depth);
+ if (depth > 0)
+ b.appendArray("1", build(depth - 1));
+ return b.obj();
+ }
+
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ for (int i = 5; i < 100; i += 10) {
+ s->setObject("a", build(i), false);
+ s->invokeSafe("tojson( a )", 0, 0);
+
+ s->setObject("a", build(5), true);
+ s->invokeSafe("tojson( a )", 0, 0);
}
- void reset() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
+ delete s;
+ }
+};
- client.dropCollection( ns() );
- }
-
- static const char *ns() { return "unittest.jstests.utf8check"; }
- };
+/**
+ * Test exec() timeout value terminates execution (SERVER-8053)
+ */
+class ExecTimeout {
+public:
+ void run() {
+ scoped_ptr<Scope> scope(globalScriptEngine->newScope());
- class LongUtf8String {
- public:
- LongUtf8String() { reset(); }
- ~LongUtf8String() { reset(); }
- void run() {
- if( !globalScriptEngine->utf8Ok() )
- return;
+ // assert timeout occurred
+ ASSERT(!scope->exec("var a = 1; while (true) { ; }", "ExecTimeout", false, true, false, 1));
+ }
+};
- OperationContextImpl txn;
- DBDirectClient client(&txn);
+/**
+ * Test exec() timeout value terminates execution (SERVER-8053)
+ */
+class ExecNoTimeout {
+public:
+ void run() {
+ scoped_ptr<Scope> scope(globalScriptEngine->newScope());
+
+ // assert no timeout occurred
+ ASSERT(scope->exec("var a = function() { return 1; }",
+ "ExecNoTimeout",
+ false,
+ true,
+ false,
+ 5 * 60 * 1000));
+ }
+};
- client.eval( "unittest", "db.jstests.longutf8string.save( {_id:'\\uffff\\uffff\\uffff\\uffff'} )" );
+/**
+ * Test invoke() timeout value terminates execution (SERVER-8053)
+ */
+class InvokeTimeout {
+public:
+ void run() {
+ scoped_ptr<Scope> scope(globalScriptEngine->newScope());
+
+ // scope timeout after 500ms
+ bool caught = false;
+ try {
+ scope->invokeSafe(
+ "function() { "
+ " while (true) { } "
+ "} ",
+ 0,
+ 0,
+ 1);
+ } catch (const DBException&) {
+ caught = true;
}
- private:
- void reset() {
- OperationContextImpl txn;
- DBDirectClient client(&txn);
+ ASSERT(caught);
+ }
+};
- client.dropCollection( ns() );
+/**
+ * Test invoke() timeout value does not terminate execution (SERVER-8053)
+ */
+class InvokeNoTimeout {
+public:
+ void run() {
+ scoped_ptr<Scope> scope(globalScriptEngine->newScope());
+
+ // invoke completes before timeout
+ scope->invokeSafe(
+ "function() { "
+ " for (var i=0; i<1; i++) { ; } "
+ "} ",
+ 0,
+ 0,
+ 5 * 60 * 1000);
+ }
+};
+
+
+class Utf8Check {
+public:
+ Utf8Check() {
+ reset();
+ }
+ ~Utf8Check() {
+ reset();
+ }
+ void run() {
+ if (!globalScriptEngine->utf8Ok()) {
+ mongo::unittest::log() << "warning: utf8 not supported" << endl;
+ return;
}
-
- static const char *ns() { return "unittest.jstests.longutf8string"; }
- };
-
- class InvalidUTF8Check {
- public:
- void run() {
- if( !globalScriptEngine->utf8Ok() )
- return;
-
- auto_ptr<Scope> s;
- s.reset( globalScriptEngine->newScope() );
-
- BSONObj b;
- {
- char crap[5];
-
- crap[0] = (char) 128;
- crap[1] = 17;
- crap[2] = (char) 128;
- crap[3] = 17;
- crap[4] = 0;
-
- BSONObjBuilder bb;
- bb.append( "x" , crap );
- b = bb.obj();
- }
-
- //cout << "ELIOT: " << b.jsonString() << endl;
- // its ok if this is handled by js, just can't create a c++ exception
- s->invoke( "x=this.x.length;" , 0, &b );
+ string utf8ObjSpec = "{'_id':'\\u0001\\u007f\\u07ff\\uffff'}";
+ BSONObj utf8Obj = fromjson(utf8ObjSpec);
+
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.insert(ns(), utf8Obj);
+ client.eval("unittest",
+ "v = db.jstests.utf8check.findOne(); db.jstests.utf8check.remove( {} ); "
+ "db.jstests.utf8check.insert( v );");
+ check(utf8Obj, client.findOne(ns(), BSONObj()));
+ }
+
+private:
+ void check(const BSONObj& one, const BSONObj& two) {
+ if (one.woCompare(two) != 0) {
+ static string fail =
+ string("Assertion failure expected ") + one.toString() + ", got " + two.toString();
+ FAIL(fail.c_str());
}
- };
-
- class CodeTests {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- {
- BSONObjBuilder b;
- b.append( "a" , 1 );
- b.appendCode( "b" , "function(){ out.b = 11; }" );
- b.appendCodeWScope( "c" , "function(){ out.c = 12; }" , BSONObj() );
- b.appendCodeWScope( "d" , "function(){ out.d = 13 + bleh; }" , BSON( "bleh" << 5 ) );
- s->setObject( "foo" , b.obj() );
- }
-
- s->invokeSafe( "out = {}; out.a = foo.a; foo.b(); foo.c();" , 0, 0 );
- BSONObj out = s->getObject( "out" );
-
- ASSERT_EQUALS( 1 , out["a"].number() );
- ASSERT_EQUALS( 11 , out["b"].number() );
- ASSERT_EQUALS( 12 , out["c"].number() );
-
- // Guess we don't care about this
- //s->invokeSafe( "foo.d() " , BSONObj() );
- //out = s->getObject( "out" );
- //ASSERT_EQUALS( 18 , out["d"].number() );
-
-
- delete s;
+ }
+
+ void reset() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.dropCollection(ns());
+ }
+
+ static const char* ns() {
+ return "unittest.jstests.utf8check";
+ }
+};
+
+class LongUtf8String {
+public:
+ LongUtf8String() {
+ reset();
+ }
+ ~LongUtf8String() {
+ reset();
+ }
+ void run() {
+ if (!globalScriptEngine->utf8Ok())
+ return;
+
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.eval("unittest",
+ "db.jstests.longutf8string.save( {_id:'\\uffff\\uffff\\uffff\\uffff'} )");
+ }
+
+private:
+ void reset() {
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+
+ client.dropCollection(ns());
+ }
+
+ static const char* ns() {
+ return "unittest.jstests.longutf8string";
+ }
+};
+
+class InvalidUTF8Check {
+public:
+ void run() {
+ if (!globalScriptEngine->utf8Ok())
+ return;
+
+ auto_ptr<Scope> s;
+ s.reset(globalScriptEngine->newScope());
+
+ BSONObj b;
+ {
+ char crap[5];
+
+ crap[0] = (char)128;
+ crap[1] = 17;
+ crap[2] = (char)128;
+ crap[3] = 17;
+ crap[4] = 0;
+
+ BSONObjBuilder bb;
+ bb.append("x", crap);
+ b = bb.obj();
}
- };
-
- namespace RoundTripTests {
-
- // Inherit from this class to test round tripping of JSON objects
- class TestRoundTrip {
- public:
- virtual ~TestRoundTrip() {}
- void run() {
- // Insert in Javascript -> Find using DBDirectClient
-
- // Drop the collection
- OperationContextImpl txn;
- DBDirectClient client(&txn);
-
- client.dropCollection( "unittest.testroundtrip" );
-
- // Insert in Javascript
- stringstream jsInsert;
- jsInsert << "db.testroundtrip.insert(" << jsonIn() << ")";
- ASSERT_TRUE( client.eval( "unittest" , jsInsert.str() ) );
-
- // Find using DBDirectClient
- BSONObj excludeIdProjection = BSON( "_id" << 0 );
- BSONObj directFind = client.findOne( "unittest.testroundtrip",
- "",
- &excludeIdProjection);
- bsonEquals( bson(), directFind );
-
-
- // Insert using DBDirectClient -> Find in Javascript
-
- // Drop the collection
- client.dropCollection( "unittest.testroundtrip" );
-
- // Insert using DBDirectClient
- client.insert( "unittest.testroundtrip" , bson() );
-
- // Find in Javascript
- stringstream jsFind;
- jsFind << "dbref = db.testroundtrip.findOne( { } , { _id : 0 } )\n"
- << "assert.eq(dbref, " << jsonOut() << ")";
- ASSERT_TRUE( client.eval( "unittest" , jsFind.str() ) );
- }
- protected:
-
- // Methods that must be defined by child classes
- virtual BSONObj bson() const = 0;
- virtual string json() const = 0;
-
- // This can be overriden if a different meaning of equality besides woCompare is needed
- virtual void bsonEquals( const BSONObj &expected, const BSONObj &actual ) {
- if ( expected.woCompare( actual ) ) {
- ::mongo::log() << "want:" << expected.jsonString()
- << " size: " << expected.objsize() << endl;
- ::mongo::log() << "got :" << actual.jsonString()
- << " size: " << actual.objsize() << endl;
- ::mongo::log() << expected.hexDump() << endl;
- ::mongo::log() << actual.hexDump() << endl;
- }
- ASSERT( !expected.woCompare( actual ) );
- }
-
- // This can be overriden if the JSON representation is altered on the round trip
- virtual string jsonIn() const {
- return json();
- }
- virtual string jsonOut() const {
- return json();
- }
- };
-
- class DBRefTest : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- OID o;
- memset( &o, 0, 12 );
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", o);
- subBuilder.done();
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : DBRef( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
- }
-
- // A "fetch" function is added to the DBRef object when it is inserted using the
- // constructor, so we need to compare the fields individually
- virtual void bsonEquals( const BSONObj &expected, const BSONObj &actual ) {
- ASSERT_EQUALS( expected["a"].type() , actual["a"].type() );
- ASSERT_EQUALS( expected["a"]["$id"].OID() , actual["a"]["$id"].OID() );
- ASSERT_EQUALS( expected["a"]["$ref"].String() , actual["a"]["$ref"].String() );
- }
- };
-
- class DBPointerTest : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- OID o;
- memset( &o, 0, 12 );
- b.appendDBRef( "a" , "ns" , o );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : DBPointer( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
- }
- };
-
- class InformalDBRefTest : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", "000000000000000000000000");
- subBuilder.done();
- return b.obj();
- }
-
- // Don't need to return anything because we are overriding both jsonOut and jsonIn
- virtual string json() const { return ""; }
-
- // Need to override these because the JSON doesn't actually round trip.
- // An object with "$ref" and "$id" fields is handled specially and different on the way out.
- virtual string jsonOut() const {
- return "{ \"a\" : DBRef( \"ns\", \"000000000000000000000000\" ) }";
- }
- virtual string jsonIn() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$ref\" : \"ns\" , " <<
- "\"$id\" : \"000000000000000000000000\" } }";
- return ss.str();
- }
- };
-
- class InformalDBRefOIDTest : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- OID o;
- memset( &o, 0, 12 );
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", o);
- subBuilder.done();
- return b.obj();
- }
-
- // Don't need to return anything because we are overriding both jsonOut and jsonIn
- virtual string json() const { return ""; }
-
- // Need to override these because the JSON doesn't actually round trip.
- // An object with "$ref" and "$id" fields is handled specially and different on the way out.
- virtual string jsonOut() const {
- return "{ \"a\" : DBRef( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
- }
- virtual string jsonIn() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$ref\" : \"ns\" , " <<
- "\"$id\" : ObjectId( \"000000000000000000000000\" ) } }";
- return ss.str();
- }
- };
-
- class InformalDBRefExtraFieldTest : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- OID o;
- memset( &o, 0, 12 );
- BSONObjBuilder subBuilder(b.subobjStart("a"));
- subBuilder.append("$ref", "ns");
- subBuilder.append("$id", o);
- subBuilder.append("otherfield", "value");
- subBuilder.done();
- return b.obj();
- }
-
- // Don't need to return anything because we are overriding both jsonOut and jsonIn
- virtual string json() const { return ""; }
-
- // Need to override these because the JSON doesn't actually round trip.
- // An object with "$ref" and "$id" fields is handled specially and different on the way out.
- virtual string jsonOut() const {
- return "{ \"a\" : DBRef( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
- }
- virtual string jsonIn() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$ref\" : \"ns\" , " <<
- "\"$id\" : ObjectId( \"000000000000000000000000\" ) , " <<
- "\"otherfield\" : \"value\" } }";
- return ss.str();
- }
- };
-
- class Empty : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- return b.obj();
- }
- virtual string json() const {
- return "{}";
- }
- };
-
- class EmptyWithSpace : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- return b.obj();
- }
- virtual string json() const {
- return "{ }";
- }
- };
-
- class SingleString : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "b" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"b\" }";
- }
- };
-
- class EmptyStrings : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"\" : \"\" }";
- }
- };
-
- class SingleNumber : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : 1 }";
- }
- };
-
- class RealNumber : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- double d;
- ASSERT_OK(parseNumberFromString( "0.7", &d ));
- b.append( "a", d );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : 0.7 }";
- }
- };
-
- class FancyNumber : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- double d;
- ASSERT_OK(parseNumberFromString( "-4.4433e-2", &d ));
- b.append( "a", d );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : -4.4433e-2 }";
- }
- };
-
- class TwoElements : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- b.append( "b", "foo" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : 1, \"b\" : \"foo\" }";
- }
- };
-
- class Subobject : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- BSONObjBuilder c;
- c.append( "z", b.done() );
- return c.obj();
- }
- virtual string json() const {
- return "{ \"z\" : { \"a\" : 1 } }";
- }
- };
-
- class DeeplyNestedObject : public TestRoundTrip {
- virtual string buildJson(int depth) const {
- if (depth == 0) {
- return "{\"0\":true}";
- }
- else {
- std::stringstream ss;
- ss << "{\"" << depth << "\":" << buildJson(depth - 1) << "}";
- depth--;
- return ss.str();
- }
- }
- virtual BSONObj buildBson(int depth) const {
- BSONObjBuilder builder;
- if (depth == 0) {
- builder.append( "0", true );
- return builder.obj();
- }
- else {
- std::stringstream ss;
- ss << depth;
- depth--;
- builder.append(ss.str(), buildBson(depth));
- return builder.obj();
- }
- }
- virtual BSONObj bson() const {
- return buildBson(35);
- }
- virtual string json() const {
- return buildJson(35);
- }
- };
-
- class ArrayEmpty : public TestRoundTrip {
- virtual BSONObj bson() const {
- vector< int > arr;
- BSONObjBuilder b;
- b.append( "a", arr );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : [] }";
- }
- };
-
- class Array : public TestRoundTrip {
- virtual BSONObj bson() const {
- vector< int > arr;
- arr.push_back( 1 );
- arr.push_back( 2 );
- arr.push_back( 3 );
- BSONObjBuilder b;
- b.append( "a", arr );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : [ 1, 2, 3 ] }";
- }
- };
-
- class True : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendBool( "a", true );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : true }";
- }
- };
- class False : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendBool( "a", false );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : false }";
- }
- };
+ // cout << "ELIOT: " << b.jsonString() << endl;
+ // its ok if this is handled by js, just can't create a c++ exception
+ s->invoke("x=this.x.length;", 0, &b);
+ }
+};
- class Null : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNull( "a" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : null }";
- }
- };
+class CodeTests {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
- class Undefined : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendUndefined( "a" );
- return b.obj();
- }
-
- // Don't need to return anything because we are overriding both jsonOut and jsonIn
- virtual string json() const { return ""; }
-
- // undefined values come out as null in the shell. See SERVER-6102.
- virtual string jsonIn() const {
- return "{ \"a\" : undefined }";
- }
- virtual string jsonOut() const {
- return "{ \"a\" : null }";
- }
- };
-
- class EscapedCharacters : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "\" \\ / \b \f \n \r \t \v" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t \\v\" }";
- }
- };
-
- class NonEscapedCharacters : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "% { a z $ # ' " );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\% \\{ \\a \\z \\$ \\# \\' \\ \" }";
- }
- };
-
- class AllowedControlCharacter : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "\x7f" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\x7f\" }";
- }
- };
-
- class NumbersInFieldName : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "b1", "b" );
- return b.obj();
- }
- virtual string json() const {
- return "{ b1 : \"b\" }";
- }
- };
-
- class EscapeFieldName : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "\n", "b" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"\\n\" : \"b\" }";
- }
- };
-
- class EscapedUnicodeToUtf8 : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char u[ 7 ];
- u[ 0 ] = 0xe0 | 0x0a;
- u[ 1 ] = 0x80;
- u[ 2 ] = 0x80;
- u[ 3 ] = 0xe0 | 0x0a;
- u[ 4 ] = 0x80;
- u[ 5 ] = 0x80;
- u[ 6 ] = 0;
- b.append( "a", (char *) u );
- BSONObj built = b.obj();
- ASSERT_EQUALS( string( (char *) u ), built.firstElement().valuestr() );
- return built;
- }
- virtual string json() const {
- return "{ \"a\" : \"\\ua000\\uA000\" }";
- }
- };
-
- class Utf8AllOnes : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char u[ 8 ];
- u[ 0 ] = 0x01;
-
- u[ 1 ] = 0x7f;
-
- u[ 2 ] = 0xdf;
- u[ 3 ] = 0xbf;
-
- u[ 4 ] = 0xef;
- u[ 5 ] = 0xbf;
- u[ 6 ] = 0xbf;
-
- u[ 7 ] = 0;
-
- b.append( "a", (char *) u );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
- }
- };
-
- class Utf8FirstByteOnes : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char u[ 6 ];
- u[ 0 ] = 0xdc;
- u[ 1 ] = 0x80;
-
- u[ 2 ] = 0xef;
- u[ 3 ] = 0xbc;
- u[ 4 ] = 0x80;
-
- u[ 5 ] = 0;
-
- b.append( "a", (char *) u );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\u0700\\uff00\" }";
- }
- };
-
- class BinData : public TestRoundTrip {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : BinData( 0 , \"YWJj\" ) }";
- }
- };
-
- class BinDataPaddedSingle : public TestRoundTrip {
- virtual BSONObj bson() const {
- char z[ 2 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- BSONObjBuilder b;
- b.appendBinData( "a", 2, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : BinData( 0 , \"YWI=\" ) }";
- }
- };
-
- class BinDataPaddedDouble : public TestRoundTrip {
- virtual BSONObj bson() const {
- char z[ 1 ];
- z[ 0 ] = 'a';
- BSONObjBuilder b;
- b.appendBinData( "a", 1, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : BinData( 0 , \"YQ==\" ) }";
- }
- };
-
- class BinDataAllChars : public TestRoundTrip {
- virtual BSONObj bson() const {
- unsigned char z[] = {
- 0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
- 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
- 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
- 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
- 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
- };
- BSONObjBuilder b;
- b.appendBinData( "a", 48, BinDataGeneral, z );
- return b.obj();
- }
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : BinData( 0 , \"ABCDEFGHIJKLMNOPQRSTUVWXYZ" <<
- "abcdefghijklmnopqrstuvwxyz0123456789+/\" ) }";
- return ss.str();
- }
- };
-
- class Date : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", 0 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : new Date( 0 ) }";
- }
- };
-
- class DateNonzero : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", 100 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : new Date( 100 ) }";
- }
- };
-
- class DateNegative : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", -1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : new Date( -1 ) }";
- }
- };
-
- class Timestamp : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendTimestamp( "a", 20000ULL, 5 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : Timestamp( 20, 5 ) }";
- }
- };
-
- class TimestampMax : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendMaxForType( "a", mongo::Timestamp );
- BSONObj o = b.obj();
- return o;
- }
- virtual string json() const {
- OpTime opTime = OpTime::max();
- stringstream ss;
- ss << "{ \"a\" : Timestamp( " << opTime.getSecs() << ", " << opTime.getInc()
- << " ) }";
- return ss.str();
- }
- };
-
- class Regex : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "b", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : /b/ }";
- }
- };
-
- class RegexWithQuotes : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "\"", "" );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"a\" : /\"/ }";
- }
- };
-
- class UnquotedFieldName : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a_b", 1 );
- return b.obj();
- }
- virtual string json() const {
- return "{ a_b : 1 }";
- }
- };
-
- class SingleQuotes : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "ab'c\"", "bb\b '\"" );
- return b.obj();
- }
- virtual string json() const {
- return "{ 'ab\\'c\"' : 'bb\\b \\'\"' }";
- }
- };
-
- class ObjectId : public TestRoundTrip {
- virtual BSONObj bson() const {
- OID id;
- id.init( "deadbeeff00ddeadbeeff00d" );
- BSONObjBuilder b;
- b.appendOID( "foo", &id );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"foo\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
- }
- };
-
- class NumberLong : public TestRoundTrip {
- public:
- virtual BSONObj bson() const {
- return BSON( "long" << 4611686018427387904ll ); // 2**62
- }
- virtual string json() const {
- return "{ \"long\": NumberLong(4611686018427387904) }";
- }
- };
-
- class NumberInt : public TestRoundTrip {
- public:
- virtual BSONObj bson() const {
- return BSON( "int" << static_cast<int>(100) );
- }
- virtual string json() const {
- return "{ \"int\": NumberInt(100) }";
- }
- };
-
- class Number : public TestRoundTrip {
- public:
- virtual BSONObj bson() const {
- return BSON( "double" << 3.14 );
- }
- virtual string json() const {
- return "{ \"double\": Number(3.14) }";
- }
- };
-
- class UUID : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char z[] = {
- 0xAB, 0xCD, 0xEF, 0xAB, 0xCD, 0xEF,
- 0xAB, 0xCD, 0xEF, 0xAB, 0xCD, 0xEF,
- 0x00, 0x00, 0x00, 0x00
- };
- b.appendBinData( "a" , 16 , bdtUUID , z );
- return b.obj();
- }
+ {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ b.appendCode("b", "function(){ out.b = 11; }");
+ b.appendCodeWScope("c", "function(){ out.c = 12; }", BSONObj());
+ b.appendCodeWScope("d", "function(){ out.d = 13 + bleh; }", BSON("bleh" << 5));
+ s->setObject("foo", b.obj());
+ }
- // Don't need to return anything because we are overriding both jsonOut and jsonIn
- virtual string json() const { return ""; }
+ s->invokeSafe("out = {}; out.a = foo.a; foo.b(); foo.c();", 0, 0);
+ BSONObj out = s->getObject("out");
- // The UUID constructor corresponds to a special BinData type
- virtual string jsonIn() const {
- return "{ \"a\" : UUID(\"abcdefabcdefabcdefabcdef00000000\") }";
- }
- virtual string jsonOut() const {
- return "{ \"a\" : BinData(3,\"q83vq83vq83vq83vAAAAAA==\") }";
- }
- };
+ ASSERT_EQUALS(1, out["a"].number());
+ ASSERT_EQUALS(11, out["b"].number());
+ ASSERT_EQUALS(12, out["c"].number());
- class HexData : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char z[] = {
- 0xAB, 0xCD, 0xEF, 0xAB, 0xCD, 0xEF,
- 0xAB, 0xCD, 0xEF, 0xAB, 0xCD, 0xEF,
- 0x00, 0x00, 0x00, 0x00
- };
- b.appendBinData( "a" , 16 , BinDataGeneral , z );
- return b.obj();
- }
+ // Guess we don't care about this
+ // s->invokeSafe( "foo.d() " , BSONObj() );
+ // out = s->getObject( "out" );
+ // ASSERT_EQUALS( 18 , out["d"].number() );
- // Don't need to return anything because we are overriding both jsonOut and jsonIn
- virtual string json() const { return ""; }
- // The HexData constructor creates a BinData type from a hex string
- virtual string jsonIn() const {
- return "{ \"a\" : HexData(0,\"abcdefabcdefabcdefabcdef00000000\") }";
- }
- virtual string jsonOut() const {
- return "{ \"a\" : BinData(0,\"q83vq83vq83vq83vAAAAAA==\") }";
- }
- };
+ delete s;
+ }
+};
- class MD5 : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- unsigned char z[] = {
- 0xAB, 0xCD, 0xEF, 0xAB, 0xCD, 0xEF,
- 0xAB, 0xCD, 0xEF, 0xAB, 0xCD, 0xEF,
- 0x00, 0x00, 0x00, 0x00
- };
- b.appendBinData( "a" , 16 , MD5Type , z );
- return b.obj();
- }
+namespace RoundTripTests {
- // Don't need to return anything because we are overriding both jsonOut and jsonIn
- virtual string json() const { return ""; }
+// Inherit from this class to test round tripping of JSON objects
+class TestRoundTrip {
+public:
+ virtual ~TestRoundTrip() {}
+ void run() {
+ // Insert in Javascript -> Find using DBDirectClient
- // The HexData constructor creates a BinData type from a hex string
- virtual string jsonIn() const {
- return "{ \"a\" : MD5(\"abcdefabcdefabcdefabcdef00000000\") }";
- }
- virtual string jsonOut() const {
- return "{ \"a\" : BinData(5,\"q83vq83vq83vq83vAAAAAA==\") }";
- }
- };
-
- class NullString : public TestRoundTrip {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "x" , "a\0b" , 4 );
- return b.obj();
- }
- virtual string json() const {
- return "{ \"x\" : \"a\\u0000b\" }";
- }
- };
+ // Drop the collection
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
- } // namespace RoundTripTests
+ client.dropCollection("unittest.testroundtrip");
- class BinDataType {
- public:
+ // Insert in Javascript
+ stringstream jsInsert;
+ jsInsert << "db.testroundtrip.insert(" << jsonIn() << ")";
+ ASSERT_TRUE(client.eval("unittest", jsInsert.str()));
- void pp( const char * s , BSONElement e ) {
- int len;
- const char * data = e.binData( len );
- cout << s << ":" << e.binDataType() << "\t" << len << endl;
- cout << "\t";
- for ( int i=0; i<len; i++ )
- cout << (int)(data[i]) << " ";
- cout << endl;
- }
+ // Find using DBDirectClient
+ BSONObj excludeIdProjection = BSON("_id" << 0);
+ BSONObj directFind = client.findOne("unittest.testroundtrip", "", &excludeIdProjection);
+ bsonEquals(bson(), directFind);
- void run() {
- Scope * s = globalScriptEngine->newScope();
- const char * foo = "asdas\0asdasd";
- const char * base64 = "YXNkYXMAYXNkYXNk";
+ // Insert using DBDirectClient -> Find in Javascript
- BSONObj in;
- {
- BSONObjBuilder b;
- b.append( "a" , 7 );
- b.appendBinData( "b" , 12 , BinDataGeneral , foo );
- in = b.obj();
- s->setObject( "x" , in );
- }
+ // Drop the collection
+ client.dropCollection("unittest.testroundtrip");
- s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , 0, 0 );
- s->invokeSafe( "y = { c : myb };" , 0, 0 );
-
- BSONObj out = s->getObject( "y" );
- ASSERT_EQUALS( BinData , out["c"].type() );
-// pp( "in " , in["b"] );
-// pp( "out" , out["c"] );
- ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
-
- // check that BinData js class is utilized
- s->invokeSafe( "q = x.b.toString();", 0, 0 );
- stringstream expected;
- expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")";
- ASSERT_EQUALS( expected.str(), s->getString( "q" ) );
-
- stringstream scriptBuilder;
- scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };";
- string script = scriptBuilder.str();
- s->invokeSafe( script.c_str(), 0, 0 );
- out = s->getObject( "z" );
-// pp( "out" , out["c"] );
- ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
-
- s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", 0, 0 );
- out = s->getObject( "a" );
- int len = -1;
- out[ "f" ].binData( len );
- ASSERT_EQUALS( 0, len );
- ASSERT_EQUALS( 128, out[ "f" ].binDataType() );
-
- delete s;
- }
- };
+ // Insert using DBDirectClient
+ client.insert("unittest.testroundtrip", bson());
- class VarTests {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
+ // Find in Javascript
+ stringstream jsFind;
+ jsFind << "dbref = db.testroundtrip.findOne( { } , { _id : 0 } )\n"
+ << "assert.eq(dbref, " << jsonOut() << ")";
+ ASSERT_TRUE(client.eval("unittest", jsFind.str()));
+ }
- ASSERT( s->exec( "a = 5;" , "a" , false , true , false ) );
- ASSERT_EQUALS( 5 , s->getNumber("a" ) );
+protected:
+ // Methods that must be defined by child classes
+ virtual BSONObj bson() const = 0;
+ virtual string json() const = 0;
- ASSERT( s->exec( "var b = 6;" , "b" , false , true , false ) );
- ASSERT_EQUALS( 6 , s->getNumber("b" ) );
- delete s;
+ // This can be overriden if a different meaning of equality besides woCompare is needed
+ virtual void bsonEquals(const BSONObj& expected, const BSONObj& actual) {
+ if (expected.woCompare(actual)) {
+ ::mongo::log() << "want:" << expected.jsonString() << " size: " << expected.objsize()
+ << endl;
+ ::mongo::log() << "got :" << actual.jsonString() << " size: " << actual.objsize()
+ << endl;
+ ::mongo::log() << expected.hexDump() << endl;
+ ::mongo::log() << actual.hexDump() << endl;
}
- };
-
- class Speed1 {
- public:
- void run() {
- BSONObj start = BSON( "x" << 5.0 );
- BSONObj empty;
-
- auto_ptr<Scope> s;
- s.reset( globalScriptEngine->newScope() );
-
- ScriptingFunction f = s->createFunction( "return this.x + 6;" );
-
- Timer t;
- double n = 0;
- for ( ; n < 10000 ; n++ ) {
- s->invoke( f , &empty, &start );
- ASSERT_EQUALS( 11 , s->getNumber( "__returnValue" ) );
- }
- //cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << endl;
+ ASSERT(!expected.woCompare(actual));
+ }
+
+ // This can be overriden if the JSON representation is altered on the round trip
+ virtual string jsonIn() const {
+ return json();
+ }
+ virtual string jsonOut() const {
+ return json();
+ }
+};
+
+class DBRefTest : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset(&o, 0, 12);
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", o);
+ subBuilder.done();
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : DBRef( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
+ }
+
+ // A "fetch" function is added to the DBRef object when it is inserted using the
+ // constructor, so we need to compare the fields individually
+ virtual void bsonEquals(const BSONObj& expected, const BSONObj& actual) {
+ ASSERT_EQUALS(expected["a"].type(), actual["a"].type());
+ ASSERT_EQUALS(expected["a"]["$id"].OID(), actual["a"]["$id"].OID());
+ ASSERT_EQUALS(expected["a"]["$ref"].String(), actual["a"]["$ref"].String());
+ }
+};
+
+class DBPointerTest : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset(&o, 0, 12);
+ b.appendDBRef("a", "ns", o);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : DBPointer( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
+ }
+};
+
+class InformalDBRefTest : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", "000000000000000000000000");
+ subBuilder.done();
+ return b.obj();
+ }
+
+ // Don't need to return anything because we are overriding both jsonOut and jsonIn
+ virtual string json() const {
+ return "";
+ }
+
+ // Need to override these because the JSON doesn't actually round trip.
+ // An object with "$ref" and "$id" fields is handled specially and different on the way out.
+ virtual string jsonOut() const {
+ return "{ \"a\" : DBRef( \"ns\", \"000000000000000000000000\" ) }";
+ }
+ virtual string jsonIn() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$ref\" : \"ns\" , "
+ << "\"$id\" : \"000000000000000000000000\" } }";
+ return ss.str();
+ }
+};
+
+class InformalDBRefOIDTest : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset(&o, 0, 12);
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", o);
+ subBuilder.done();
+ return b.obj();
+ }
+
+ // Don't need to return anything because we are overriding both jsonOut and jsonIn
+ virtual string json() const {
+ return "";
+ }
+
+ // Need to override these because the JSON doesn't actually round trip.
+ // An object with "$ref" and "$id" fields is handled specially and different on the way out.
+ virtual string jsonOut() const {
+ return "{ \"a\" : DBRef( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
+ }
+ virtual string jsonIn() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$ref\" : \"ns\" , "
+ << "\"$id\" : ObjectId( \"000000000000000000000000\" ) } }";
+ return ss.str();
+ }
+};
+
+class InformalDBRefExtraFieldTest : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset(&o, 0, 12);
+ BSONObjBuilder subBuilder(b.subobjStart("a"));
+ subBuilder.append("$ref", "ns");
+ subBuilder.append("$id", o);
+ subBuilder.append("otherfield", "value");
+ subBuilder.done();
+ return b.obj();
+ }
+
+ // Don't need to return anything because we are overriding both jsonOut and jsonIn
+ virtual string json() const {
+ return "";
+ }
+
+ // Need to override these because the JSON doesn't actually round trip.
+ // An object with "$ref" and "$id" fields is handled specially and different on the way out.
+ virtual string jsonOut() const {
+ return "{ \"a\" : DBRef( \"ns\", ObjectId( \"000000000000000000000000\" ) ) }";
+ }
+ virtual string jsonIn() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$ref\" : \"ns\" , "
+ << "\"$id\" : ObjectId( \"000000000000000000000000\" ) , "
+ << "\"otherfield\" : \"value\" } }";
+ return ss.str();
+ }
+};
+
+class Empty : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{}";
+ }
+};
+
+class EmptyWithSpace : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ }";
+ }
+};
+
+class SingleString : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "b");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"b\" }";
+ }
+};
+
+class EmptyStrings : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\" : \"\" }";
+ }
+};
+
+class SingleNumber : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1 }";
+ }
+};
+
+class RealNumber : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ double d;
+ ASSERT_OK(parseNumberFromString("0.7", &d));
+ b.append("a", d);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 0.7 }";
+ }
+};
+
+class FancyNumber : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ double d;
+ ASSERT_OK(parseNumberFromString("-4.4433e-2", &d));
+ b.append("a", d);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : -4.4433e-2 }";
+ }
+};
+
+class TwoElements : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ b.append("b", "foo");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1, \"b\" : \"foo\" }";
+ }
+};
+
+class Subobject : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ BSONObjBuilder c;
+ c.append("z", b.done());
+ return c.obj();
+ }
+ virtual string json() const {
+ return "{ \"z\" : { \"a\" : 1 } }";
+ }
+};
+
+class DeeplyNestedObject : public TestRoundTrip {
+ virtual string buildJson(int depth) const {
+ if (depth == 0) {
+ return "{\"0\":true}";
+ } else {
+ std::stringstream ss;
+ ss << "{\"" << depth << "\":" << buildJson(depth - 1) << "}";
+ depth--;
+ return ss.str();
}
- };
-
- class ScopeOut {
- public:
- void run() {
- auto_ptr<Scope> s;
- s.reset( globalScriptEngine->newScope() );
-
- s->invokeSafe( "x = 5;" , 0, 0 );
- {
- BSONObjBuilder b;
- s->append( b , "z" , "x" );
- ASSERT_EQUALS( BSON( "z" << 5 ) , b.obj() );
- }
-
- s->invokeSafe( "x = function(){ return 17; }" , 0, 0 );
- BSONObj temp;
- {
- BSONObjBuilder b;
- s->append( b , "z" , "x" );
- temp = b.obj();
- }
-
- s->invokeSafe( "foo = this.z();" , 0, &temp );
- ASSERT_EQUALS( 17 , s->getNumber( "foo" ) );
+ }
+ virtual BSONObj buildBson(int depth) const {
+ BSONObjBuilder builder;
+ if (depth == 0) {
+ builder.append("0", true);
+ return builder.obj();
+ } else {
+ std::stringstream ss;
+ ss << depth;
+ depth--;
+ builder.append(ss.str(), buildBson(depth));
+ return builder.obj();
}
- };
-
- class RenameTest {
- public:
- void run() {
- auto_ptr<Scope> s;
- s.reset( globalScriptEngine->newScope() );
-
- s->setNumber( "x" , 5 );
- ASSERT_EQUALS( 5 , s->getNumber( "x" ) );
- ASSERT_EQUALS( Undefined , s->type( "y" ) );
-
- s->rename( "x" , "y" );
- ASSERT_EQUALS( 5 , s->getNumber( "y" ) );
- ASSERT_EQUALS( Undefined , s->type( "x" ) );
-
- s->rename( "y" , "x" );
- ASSERT_EQUALS( 5 , s->getNumber( "x" ) );
- ASSERT_EQUALS( Undefined , s->type( "y" ) );
+ }
+ virtual BSONObj bson() const {
+ return buildBson(35);
+ }
+ virtual string json() const {
+ return buildJson(35);
+ }
+};
+
+class ArrayEmpty : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ vector<int> arr;
+ BSONObjBuilder b;
+ b.append("a", arr);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [] }";
+ }
+};
+
+class Array : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ vector<int> arr;
+ arr.push_back(1);
+ arr.push_back(2);
+ arr.push_back(3);
+ BSONObjBuilder b;
+ b.append("a", arr);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [ 1, 2, 3 ] }";
+ }
+};
+
+class True : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool("a", true);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : true }";
+ }
+};
+
+class False : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool("a", false);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : false }";
+ }
+};
+
+class Null : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNull("a");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : null }";
+ }
+};
+
+class Undefined : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendUndefined("a");
+ return b.obj();
+ }
+
+ // Don't need to return anything because we are overriding both jsonOut and jsonIn
+ virtual string json() const {
+ return "";
+ }
+
+ // undefined values come out as null in the shell. See SERVER-6102.
+ virtual string jsonIn() const {
+ return "{ \"a\" : undefined }";
+ }
+ virtual string jsonOut() const {
+ return "{ \"a\" : null }";
+ }
+};
+
+class EscapedCharacters : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "\" \\ / \b \f \n \r \t \v");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t \\v\" }";
+ }
+};
+
+class NonEscapedCharacters : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "% { a z $ # ' ");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\% \\{ \\a \\z \\$ \\# \\' \\ \" }";
+ }
+};
+
+class AllowedControlCharacter : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a", "\x7f");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\x7f\" }";
+ }
+};
+
+class NumbersInFieldName : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("b1", "b");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ b1 : \"b\" }";
+ }
+};
+
+class EscapeFieldName : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("\n", "b");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\\n\" : \"b\" }";
+ }
+};
+
+class EscapedUnicodeToUtf8 : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[7];
+ u[0] = 0xe0 | 0x0a;
+ u[1] = 0x80;
+ u[2] = 0x80;
+ u[3] = 0xe0 | 0x0a;
+ u[4] = 0x80;
+ u[5] = 0x80;
+ u[6] = 0;
+ b.append("a", (char*)u);
+ BSONObj built = b.obj();
+ ASSERT_EQUALS(string((char*)u), built.firstElement().valuestr());
+ return built;
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\ua000\\uA000\" }";
+ }
+};
+
+class Utf8AllOnes : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[8];
+ u[0] = 0x01;
+
+ u[1] = 0x7f;
+
+ u[2] = 0xdf;
+ u[3] = 0xbf;
+
+ u[4] = 0xef;
+ u[5] = 0xbf;
+ u[6] = 0xbf;
+
+ u[7] = 0;
+
+ b.append("a", (char*)u);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
+ }
+};
+
+class Utf8FirstByteOnes : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[6];
+ u[0] = 0xdc;
+ u[1] = 0x80;
+
+ u[2] = 0xef;
+ u[3] = 0xbc;
+ u[4] = 0x80;
+
+ u[5] = 0;
+
+ b.append("a", (char*)u);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0700\\uff00\" }";
+ }
+};
+
+class BinData : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ char z[3];
+ z[0] = 'a';
+ z[1] = 'b';
+ z[2] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData("a", 3, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : BinData( 0 , \"YWJj\" ) }";
+ }
+};
+
+class BinDataPaddedSingle : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ char z[2];
+ z[0] = 'a';
+ z[1] = 'b';
+ BSONObjBuilder b;
+ b.appendBinData("a", 2, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : BinData( 0 , \"YWI=\" ) }";
+ }
+};
+
+class BinDataPaddedDouble : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ char z[1];
+ z[0] = 'a';
+ BSONObjBuilder b;
+ b.appendBinData("a", 1, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : BinData( 0 , \"YQ==\" ) }";
+ }
+};
+
+class BinDataAllChars : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ unsigned char z[] = {0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF};
+ BSONObjBuilder b;
+ b.appendBinData("a", 48, BinDataGeneral, z);
+ return b.obj();
+ }
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : BinData( 0 , \"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ << "abcdefghijklmnopqrstuvwxyz0123456789+/\" ) }";
+ return ss.str();
+ }
+};
+
+class Date : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", 0);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : new Date( 0 ) }";
+ }
+};
+
+class DateNonzero : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", 100);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : new Date( 100 ) }";
+ }
+};
+
+class DateNegative : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate("a", -1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : new Date( -1 ) }";
+ }
+};
+
+class Timestamp : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendTimestamp("a", 20000ULL, 5);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : Timestamp( 20, 5 ) }";
+ }
+};
+
+class TimestampMax : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendMaxForType("a", mongo::Timestamp);
+ BSONObj o = b.obj();
+ return o;
+ }
+ virtual string json() const {
+ OpTime opTime = OpTime::max();
+ stringstream ss;
+ ss << "{ \"a\" : Timestamp( " << opTime.getSecs() << ", " << opTime.getInc() << " ) }";
+ return ss.str();
+ }
+};
+
+class Regex : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "b", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : /b/ }";
+ }
+};
+
+class RegexWithQuotes : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex("a", "\"", "");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : /\"/ }";
+ }
+};
+
+class UnquotedFieldName : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("a_b", 1);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ a_b : 1 }";
+ }
+};
+
+class SingleQuotes : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("ab'c\"", "bb\b '\"");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ 'ab\\'c\"' : 'bb\\b \\'\"' }";
+ }
+};
+
+class ObjectId : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ OID id;
+ id.init("deadbeeff00ddeadbeeff00d");
+ BSONObjBuilder b;
+ b.appendOID("foo", &id);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"foo\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
+ }
+};
+
+class NumberLong : public TestRoundTrip {
+public:
+ virtual BSONObj bson() const {
+ return BSON("long" << 4611686018427387904ll); // 2**62
+ }
+ virtual string json() const {
+ return "{ \"long\": NumberLong(4611686018427387904) }";
+ }
+};
+
+class NumberInt : public TestRoundTrip {
+public:
+ virtual BSONObj bson() const {
+ return BSON("int" << static_cast<int>(100));
+ }
+ virtual string json() const {
+ return "{ \"int\": NumberInt(100) }";
+ }
+};
+
+class Number : public TestRoundTrip {
+public:
+ virtual BSONObj bson() const {
+ return BSON("double" << 3.14);
+ }
+ virtual string json() const {
+ return "{ \"double\": Number(3.14) }";
+ }
+};
+
+class UUID : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char z[] = {0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00};
+ b.appendBinData("a", 16, bdtUUID, z);
+ return b.obj();
+ }
+
+ // Don't need to return anything because we are overriding both jsonOut and jsonIn
+ virtual string json() const {
+ return "";
+ }
+
+ // The UUID constructor corresponds to a special BinData type
+ virtual string jsonIn() const {
+ return "{ \"a\" : UUID(\"abcdefabcdefabcdefabcdef00000000\") }";
+ }
+ virtual string jsonOut() const {
+ return "{ \"a\" : BinData(3,\"q83vq83vq83vq83vAAAAAA==\") }";
+ }
+};
+
+class HexData : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char z[] = {0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00};
+ b.appendBinData("a", 16, BinDataGeneral, z);
+ return b.obj();
+ }
+
+ // Don't need to return anything because we are overriding both jsonOut and jsonIn
+ virtual string json() const {
+ return "";
+ }
+
+ // The HexData constructor creates a BinData type from a hex string
+ virtual string jsonIn() const {
+ return "{ \"a\" : HexData(0,\"abcdefabcdefabcdefabcdef00000000\") }";
+ }
+ virtual string jsonOut() const {
+ return "{ \"a\" : BinData(0,\"q83vq83vq83vq83vAAAAAA==\") }";
+ }
+};
+
+class MD5 : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char z[] = {0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0xAB,
+ 0xCD,
+ 0xEF,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00};
+ b.appendBinData("a", 16, MD5Type, z);
+ return b.obj();
+ }
+
+ // Don't need to return anything because we are overriding both jsonOut and jsonIn
+ virtual string json() const {
+ return "";
+ }
+
+ // The HexData constructor creates a BinData type from a hex string
+ virtual string jsonIn() const {
+ return "{ \"a\" : MD5(\"abcdefabcdefabcdefabcdef00000000\") }";
+ }
+ virtual string jsonOut() const {
+ return "{ \"a\" : BinData(5,\"q83vq83vq83vq83vAAAAAA==\") }";
+ }
+};
+
+class NullString : public TestRoundTrip {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("x", "a\0b", 4);
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"x\" : \"a\\u0000b\" }";
+ }
+};
+
+} // namespace RoundTripTests
+
+class BinDataType {
+public:
+ void pp(const char* s, BSONElement e) {
+ int len;
+ const char* data = e.binData(len);
+ cout << s << ":" << e.binDataType() << "\t" << len << endl;
+ cout << "\t";
+ for (int i = 0; i < len; i++)
+ cout << (int)(data[i]) << " ";
+ cout << endl;
+ }
+
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ const char* foo = "asdas\0asdasd";
+ const char* base64 = "YXNkYXMAYXNkYXNk";
+
+ BSONObj in;
+ {
+ BSONObjBuilder b;
+ b.append("a", 7);
+ b.appendBinData("b", 12, BinDataGeneral, foo);
+ in = b.obj();
+ s->setObject("x", in);
}
- };
-
- class InvalidStoredJS {
- public:
- void run() {
- BSONObjBuilder query;
- query.append( "_id" , "invalidstoredjs1" );
-
- BSONObjBuilder update;
- update.append( "_id" , "invalidstoredjs1" );
- update.appendCode( "value" , "function () { db.test.find().forEach(function(obj) { continue; }); }" );
-
- OperationContextImpl txn;
- DBDirectClient client(&txn);
- client.update( "test.system.js" , query.obj() , update.obj() , true /* upsert */ );
-
- scoped_ptr<Scope> s( globalScriptEngine->newScope() );
- client.eval( "test" , "invalidstoredjs1()" );
-
- BSONObj info;
- BSONElement ret;
- ASSERT( client.eval( "test" , "return 5 + 12" , info , ret ) );
- ASSERT_EQUALS( 17 , ret.number() );
+ s->invokeSafe("myb = x.b; print( myb ); printjson( myb );", 0, 0);
+ s->invokeSafe("y = { c : myb };", 0, 0);
+
+ BSONObj out = s->getObject("y");
+ ASSERT_EQUALS(BinData, out["c"].type());
+ // pp( "in " , in["b"] );
+ // pp( "out" , out["c"] );
+ ASSERT_EQUALS(0, in["b"].woCompare(out["c"], false));
+
+ // check that BinData js class is utilized
+ s->invokeSafe("q = x.b.toString();", 0, 0);
+ stringstream expected;
+ expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")";
+ ASSERT_EQUALS(expected.str(), s->getString("q"));
+
+ stringstream scriptBuilder;
+ scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64
+ << "\" ) };";
+ string script = scriptBuilder.str();
+ s->invokeSafe(script.c_str(), 0, 0);
+ out = s->getObject("z");
+ // pp( "out" , out["c"] );
+ ASSERT_EQUALS(0, in["b"].woCompare(out["c"], false));
+
+ s->invokeSafe("a = { f: new BinData( 128, \"\" ) };", 0, 0);
+ out = s->getObject("a");
+ int len = -1;
+ out["f"].binData(len);
+ ASSERT_EQUALS(0, len);
+ ASSERT_EQUALS(128, out["f"].binDataType());
+
+ delete s;
+ }
+};
+
+class VarTests {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ ASSERT(s->exec("a = 5;", "a", false, true, false));
+ ASSERT_EQUALS(5, s->getNumber("a"));
+
+ ASSERT(s->exec("var b = 6;", "b", false, true, false));
+ ASSERT_EQUALS(6, s->getNumber("b"));
+ delete s;
+ }
+};
+
+class Speed1 {
+public:
+ void run() {
+ BSONObj start = BSON("x" << 5.0);
+ BSONObj empty;
+
+ auto_ptr<Scope> s;
+ s.reset(globalScriptEngine->newScope());
+
+ ScriptingFunction f = s->createFunction("return this.x + 6;");
+
+ Timer t;
+ double n = 0;
+ for (; n < 10000; n++) {
+ s->invoke(f, &empty, &start);
+ ASSERT_EQUALS(11, s->getNumber("__returnValue"));
}
- };
- class NoReturnSpecified {
- public:
- void run() {
- Scope * s = globalScriptEngine->newScope();
-
- s->invoke("x=5;" , 0, 0 );
- ASSERT_EQUALS(5, s->getNumber("__returnValue"));
-
- s->invoke("x='test'", 0, 0);
- ASSERT_EQUALS("test", s->getString("__returnValue"));
-
- s->invoke("x='return'", 0, 0);
- ASSERT_EQUALS("return", s->getString("__returnValue"));
-
- s->invoke("return 'return'", 0, 0);
- ASSERT_EQUALS("return", s->getString("__returnValue"));
-
- s->invoke("x = ' return '", 0, 0);
- ASSERT_EQUALS(" return ", s->getString("__returnValue"));
-
- s->invoke("x = \" return \"", 0, 0);
- ASSERT_EQUALS(" return ", s->getString("__returnValue"));
-
- s->invoke("x = \"' return '\"", 0, 0);
- ASSERT_EQUALS("' return '", s->getString("__returnValue"));
-
- s->invoke("x = '\" return \"'", 0, 0);
- ASSERT_EQUALS("\" return \"", s->getString("__returnValue"));
-
- s->invoke(";return 5", 0, 0);
- ASSERT_EQUALS(5, s->getNumber("__returnValue"));
-
- s->invoke("String('return')", 0, 0);
- ASSERT_EQUALS("return", s->getString("__returnValue"));
-
- s->invoke("String(' return ')", 0, 0);
- ASSERT_EQUALS(" return ", s->getString("__returnValue"));
-
- // This should fail so we set the expected __returnValue to undefined
- s->invoke(";x = 5", 0, 0);
- ASSERT_EQUALS("undefined", s->getString("__returnValue"));
-
- s->invoke("String(\"'return\")", 0, 0);
- ASSERT_EQUALS("'return", s->getString("__returnValue"));
-
- s->invoke("String('\"return')", 0, 0);
- ASSERT_EQUALS("\"return", s->getString("__returnValue"));
-
- // A fail case
- s->invoke("return$ = 0", 0, 0);
- // Checks to confirm that the result is NaN
- ASSERT(s->getNumber("__returnValue") != s->getNumber("__returnValue"));
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "js" ) {
- // Initialize the Javascript interpreter
- ScriptEngine::setup();
+ // cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << endl;
+ }
+};
+
+class ScopeOut {
+public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset(globalScriptEngine->newScope());
+
+ s->invokeSafe("x = 5;", 0, 0);
+ {
+ BSONObjBuilder b;
+ s->append(b, "z", "x");
+ ASSERT_EQUALS(BSON("z" << 5), b.obj());
}
- void setupTests() {
- add< BuiltinTests >();
- add< BasicScope >();
- add< ResetScope >();
- add< FalseTests >();
- add< SimpleFunctions >();
- add< ExecLogError >();
- add< InvokeLogError >();
- add< ExecTimeout >();
- add< ExecNoTimeout >();
- add< InvokeTimeout >();
- add< InvokeNoTimeout >();
-
- add< ObjectMapping >();
- add< ObjectDecoding >();
- add< JSOIDTests >();
- add< SetImplicit >();
- add< ObjectModReadonlyTests >();
- add< OtherJSTypes >();
- add< SpecialDBTypes >();
- add< TypeConservation >();
- add< NumberLong >();
- add< NumberLong2 >();
- add< InvalidTimestamp >();
- add< RenameTest >();
-
- add< WeirdObjects >();
- add< CodeTests >();
- add< BinDataType >();
-
- add< VarTests >();
-
- add< Speed1 >();
-
- add< InvalidUTF8Check >();
- add< Utf8Check >();
- add< LongUtf8String >();
-
- add< ScopeOut >();
- add< InvalidStoredJS >();
-
- add< NoReturnSpecified >();
-
- add< RoundTripTests::DBRefTest >();
- add< RoundTripTests::DBPointerTest >();
- add< RoundTripTests::InformalDBRefTest >();
- add< RoundTripTests::InformalDBRefOIDTest >();
- add< RoundTripTests::InformalDBRefExtraFieldTest >();
- add< RoundTripTests::Empty >();
- add< RoundTripTests::EmptyWithSpace >();
- add< RoundTripTests::SingleString >();
- add< RoundTripTests::EmptyStrings >();
- add< RoundTripTests::SingleNumber >();
- add< RoundTripTests::RealNumber >();
- add< RoundTripTests::FancyNumber >();
- add< RoundTripTests::TwoElements >();
- add< RoundTripTests::Subobject >();
- add< RoundTripTests::DeeplyNestedObject >();
- add< RoundTripTests::ArrayEmpty >();
- add< RoundTripTests::Array >();
- add< RoundTripTests::True >();
- add< RoundTripTests::False >();
- add< RoundTripTests::Null >();
- add< RoundTripTests::Undefined >();
- add< RoundTripTests::EscapedCharacters >();
- add< RoundTripTests::NonEscapedCharacters >();
- add< RoundTripTests::AllowedControlCharacter >();
- add< RoundTripTests::NumbersInFieldName >();
- add< RoundTripTests::EscapeFieldName >();
- add< RoundTripTests::EscapedUnicodeToUtf8 >();
- add< RoundTripTests::Utf8AllOnes >();
- add< RoundTripTests::Utf8FirstByteOnes >();
- add< RoundTripTests::BinData >();
- add< RoundTripTests::BinDataPaddedSingle >();
- add< RoundTripTests::BinDataPaddedDouble >();
- add< RoundTripTests::BinDataAllChars >();
- add< RoundTripTests::Date >();
- add< RoundTripTests::DateNonzero >();
- add< RoundTripTests::DateNegative >();
- add< RoundTripTests::Timestamp >();
- add< RoundTripTests::TimestampMax >();
- add< RoundTripTests::Regex >();
- add< RoundTripTests::RegexWithQuotes >();
- add< RoundTripTests::UnquotedFieldName >();
- add< RoundTripTests::SingleQuotes >();
- add< RoundTripTests::ObjectId >();
- add< RoundTripTests::NumberLong >();
- add< RoundTripTests::NumberInt >();
- add< RoundTripTests::Number >();
- add< RoundTripTests::UUID >();
- add< RoundTripTests::HexData >();
- add< RoundTripTests::MD5 >();
- add< RoundTripTests::NullString >();
+ s->invokeSafe("x = function(){ return 17; }", 0, 0);
+ BSONObj temp;
+ {
+ BSONObjBuilder b;
+ s->append(b, "z", "x");
+ temp = b.obj();
}
- };
-
- SuiteInstance<All> myall;
-} // namespace JavaJSTests
+ s->invokeSafe("foo = this.z();", 0, &temp);
+ ASSERT_EQUALS(17, s->getNumber("foo"));
+ }
+};
+class RenameTest {
+public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset(globalScriptEngine->newScope());
+
+ s->setNumber("x", 5);
+ ASSERT_EQUALS(5, s->getNumber("x"));
+ ASSERT_EQUALS(Undefined, s->type("y"));
+
+ s->rename("x", "y");
+ ASSERT_EQUALS(5, s->getNumber("y"));
+ ASSERT_EQUALS(Undefined, s->type("x"));
+
+ s->rename("y", "x");
+ ASSERT_EQUALS(5, s->getNumber("x"));
+ ASSERT_EQUALS(Undefined, s->type("y"));
+ }
+};
+
+
+class InvalidStoredJS {
+public:
+ void run() {
+ BSONObjBuilder query;
+ query.append("_id", "invalidstoredjs1");
+
+ BSONObjBuilder update;
+ update.append("_id", "invalidstoredjs1");
+ update.appendCode("value",
+ "function () { db.test.find().forEach(function(obj) { continue; }); }");
+
+ OperationContextImpl txn;
+ DBDirectClient client(&txn);
+ client.update("test.system.js", query.obj(), update.obj(), true /* upsert */);
+
+ scoped_ptr<Scope> s(globalScriptEngine->newScope());
+ client.eval("test", "invalidstoredjs1()");
+
+ BSONObj info;
+ BSONElement ret;
+ ASSERT(client.eval("test", "return 5 + 12", info, ret));
+ ASSERT_EQUALS(17, ret.number());
+ }
+};
+class NoReturnSpecified {
+public:
+ void run() {
+ Scope* s = globalScriptEngine->newScope();
+
+ s->invoke("x=5;", 0, 0);
+ ASSERT_EQUALS(5, s->getNumber("__returnValue"));
+
+ s->invoke("x='test'", 0, 0);
+ ASSERT_EQUALS("test", s->getString("__returnValue"));
+
+ s->invoke("x='return'", 0, 0);
+ ASSERT_EQUALS("return", s->getString("__returnValue"));
+
+ s->invoke("return 'return'", 0, 0);
+ ASSERT_EQUALS("return", s->getString("__returnValue"));
+
+ s->invoke("x = ' return '", 0, 0);
+ ASSERT_EQUALS(" return ", s->getString("__returnValue"));
+
+ s->invoke("x = \" return \"", 0, 0);
+ ASSERT_EQUALS(" return ", s->getString("__returnValue"));
+
+ s->invoke("x = \"' return '\"", 0, 0);
+ ASSERT_EQUALS("' return '", s->getString("__returnValue"));
+
+ s->invoke("x = '\" return \"'", 0, 0);
+ ASSERT_EQUALS("\" return \"", s->getString("__returnValue"));
+
+ s->invoke(";return 5", 0, 0);
+ ASSERT_EQUALS(5, s->getNumber("__returnValue"));
+
+ s->invoke("String('return')", 0, 0);
+ ASSERT_EQUALS("return", s->getString("__returnValue"));
+
+ s->invoke("String(' return ')", 0, 0);
+ ASSERT_EQUALS(" return ", s->getString("__returnValue"));
+
+ // This should fail so we set the expected __returnValue to undefined
+ s->invoke(";x = 5", 0, 0);
+ ASSERT_EQUALS("undefined", s->getString("__returnValue"));
+
+ s->invoke("String(\"'return\")", 0, 0);
+ ASSERT_EQUALS("'return", s->getString("__returnValue"));
+
+ s->invoke("String('\"return')", 0, 0);
+ ASSERT_EQUALS("\"return", s->getString("__returnValue"));
+
+ // A fail case
+ s->invoke("return$ = 0", 0, 0);
+ // Checks to confirm that the result is NaN
+ ASSERT(s->getNumber("__returnValue") != s->getNumber("__returnValue"));
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("js") {
+ // Initialize the Javascript interpreter
+ ScriptEngine::setup();
+ }
+
+ void setupTests() {
+ add<BuiltinTests>();
+ add<BasicScope>();
+ add<ResetScope>();
+ add<FalseTests>();
+ add<SimpleFunctions>();
+ add<ExecLogError>();
+ add<InvokeLogError>();
+ add<ExecTimeout>();
+ add<ExecNoTimeout>();
+ add<InvokeTimeout>();
+ add<InvokeNoTimeout>();
+
+ add<ObjectMapping>();
+ add<ObjectDecoding>();
+ add<JSOIDTests>();
+ add<SetImplicit>();
+ add<ObjectModReadonlyTests>();
+ add<OtherJSTypes>();
+ add<SpecialDBTypes>();
+ add<TypeConservation>();
+ add<NumberLong>();
+ add<NumberLong2>();
+ add<InvalidTimestamp>();
+ add<RenameTest>();
+
+ add<WeirdObjects>();
+ add<CodeTests>();
+ add<BinDataType>();
+
+ add<VarTests>();
+
+ add<Speed1>();
+
+ add<InvalidUTF8Check>();
+ add<Utf8Check>();
+ add<LongUtf8String>();
+
+ add<ScopeOut>();
+ add<InvalidStoredJS>();
+
+ add<NoReturnSpecified>();
+
+ add<RoundTripTests::DBRefTest>();
+ add<RoundTripTests::DBPointerTest>();
+ add<RoundTripTests::InformalDBRefTest>();
+ add<RoundTripTests::InformalDBRefOIDTest>();
+ add<RoundTripTests::InformalDBRefExtraFieldTest>();
+ add<RoundTripTests::Empty>();
+ add<RoundTripTests::EmptyWithSpace>();
+ add<RoundTripTests::SingleString>();
+ add<RoundTripTests::EmptyStrings>();
+ add<RoundTripTests::SingleNumber>();
+ add<RoundTripTests::RealNumber>();
+ add<RoundTripTests::FancyNumber>();
+ add<RoundTripTests::TwoElements>();
+ add<RoundTripTests::Subobject>();
+ add<RoundTripTests::DeeplyNestedObject>();
+ add<RoundTripTests::ArrayEmpty>();
+ add<RoundTripTests::Array>();
+ add<RoundTripTests::True>();
+ add<RoundTripTests::False>();
+ add<RoundTripTests::Null>();
+ add<RoundTripTests::Undefined>();
+ add<RoundTripTests::EscapedCharacters>();
+ add<RoundTripTests::NonEscapedCharacters>();
+ add<RoundTripTests::AllowedControlCharacter>();
+ add<RoundTripTests::NumbersInFieldName>();
+ add<RoundTripTests::EscapeFieldName>();
+ add<RoundTripTests::EscapedUnicodeToUtf8>();
+ add<RoundTripTests::Utf8AllOnes>();
+ add<RoundTripTests::Utf8FirstByteOnes>();
+ add<RoundTripTests::BinData>();
+ add<RoundTripTests::BinDataPaddedSingle>();
+ add<RoundTripTests::BinDataPaddedDouble>();
+ add<RoundTripTests::BinDataAllChars>();
+ add<RoundTripTests::Date>();
+ add<RoundTripTests::DateNonzero>();
+ add<RoundTripTests::DateNegative>();
+ add<RoundTripTests::Timestamp>();
+ add<RoundTripTests::TimestampMax>();
+ add<RoundTripTests::Regex>();
+ add<RoundTripTests::RegexWithQuotes>();
+ add<RoundTripTests::UnquotedFieldName>();
+ add<RoundTripTests::SingleQuotes>();
+ add<RoundTripTests::ObjectId>();
+ add<RoundTripTests::NumberLong>();
+ add<RoundTripTests::NumberInt>();
+ add<RoundTripTests::Number>();
+ add<RoundTripTests::UUID>();
+ add<RoundTripTests::HexData>();
+ add<RoundTripTests::MD5>();
+ add<RoundTripTests::NullString>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace JavaJSTests
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
index 8d66896eb85..ffa5824fdc5 100644
--- a/src/mongo/dbtests/matchertests.cpp
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -39,246 +39,241 @@
namespace MatcherTests {
- using std::cout;
- using std::endl;
- using std::string;
-
- class CollectionBase {
- public:
- CollectionBase() { }
-
- virtual ~CollectionBase() { }
- };
-
- template <typename M>
- class Basic {
- public:
- void run() {
- BSONObj query = fromjson( "{\"a\":\"b\"}" );
- M m(query, MatchExpressionParser::WhereCallback());
- ASSERT( m.matches( fromjson( "{\"a\":\"b\"}" ) ) );
- }
- };
-
- template <typename M>
- class DoubleEqual {
- public:
- void run() {
- BSONObj query = fromjson( "{\"a\":5}" );
- M m(query, MatchExpressionParser::WhereCallback());
- ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
- }
- };
-
- template <typename M>
- class MixedNumericEqual {
- public:
- void run() {
- BSONObjBuilder query;
- query.append( "a", 5 );
- M m(query.done(), MatchExpressionParser::WhereCallback());
- ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
- }
- };
-
- template <typename M>
- class MixedNumericGt {
- public:
- void run() {
- BSONObj query = fromjson( "{\"a\":{\"$gt\":4}}" );
- M m(query, MatchExpressionParser::WhereCallback());
+using std::cout;
+using std::endl;
+using std::string;
+
+class CollectionBase {
+public:
+ CollectionBase() {}
+
+ virtual ~CollectionBase() {}
+};
+
+template <typename M>
+class Basic {
+public:
+ void run() {
+ BSONObj query = fromjson("{\"a\":\"b\"}");
+ M m(query, MatchExpressionParser::WhereCallback());
+ ASSERT(m.matches(fromjson("{\"a\":\"b\"}")));
+ }
+};
+
+template <typename M>
+class DoubleEqual {
+public:
+ void run() {
+ BSONObj query = fromjson("{\"a\":5}");
+ M m(query, MatchExpressionParser::WhereCallback());
+ ASSERT(m.matches(fromjson("{\"a\":5}")));
+ }
+};
+
+template <typename M>
+class MixedNumericEqual {
+public:
+ void run() {
+ BSONObjBuilder query;
+ query.append("a", 5);
+ M m(query.done(), MatchExpressionParser::WhereCallback());
+ ASSERT(m.matches(fromjson("{\"a\":5}")));
+ }
+};
+
+template <typename M>
+class MixedNumericGt {
+public:
+ void run() {
+ BSONObj query = fromjson("{\"a\":{\"$gt\":4}}");
+ M m(query, MatchExpressionParser::WhereCallback());
+ BSONObjBuilder b;
+ b.append("a", 5);
+ ASSERT(m.matches(b.done()));
+ }
+};
+
+template <typename M>
+class MixedNumericIN {
+public:
+ void run() {
+ BSONObj query = fromjson("{ a : { $in : [4,6] } }");
+ ASSERT_EQUALS(4, query["a"].embeddedObject()["$in"].embeddedObject()["0"].number());
+ ASSERT_EQUALS(NumberInt, query["a"].embeddedObject()["$in"].embeddedObject()["0"].type());
+
+ M m(query, MatchExpressionParser::WhereCallback());
+
+ {
BSONObjBuilder b;
- b.append( "a", 5 );
- ASSERT( m.matches( b.done() ) );
+ b.append("a", 4.0);
+ ASSERT(m.matches(b.done()));
}
- };
-
- template <typename M>
- class MixedNumericIN {
- public:
- void run() {
- BSONObj query = fromjson( "{ a : { $in : [4,6] } }" );
- ASSERT_EQUALS( 4 , query["a"].embeddedObject()["$in"].embeddedObject()["0"].number() );
- ASSERT_EQUALS( NumberInt , query["a"].embeddedObject()["$in"].embeddedObject()["0"].type() );
-
- M m(query, MatchExpressionParser::WhereCallback());
-
- {
- BSONObjBuilder b;
- b.append( "a" , 4.0 );
- ASSERT( m.matches( b.done() ) );
- }
- {
- BSONObjBuilder b;
- b.append( "a" , 5 );
- ASSERT( ! m.matches( b.done() ) );
- }
-
-
- {
- BSONObjBuilder b;
- b.append( "a" , 4 );
- ASSERT( m.matches( b.done() ) );
- }
-
- }
- };
-
- template <typename M>
- class MixedNumericEmbedded {
- public:
- void run() {
- M m(BSON("a" << BSON("x" << 1)), MatchExpressionParser::WhereCallback());
- ASSERT( m.matches( BSON( "a" << BSON( "x" << 1 ) ) ) );
- ASSERT( m.matches( BSON( "a" << BSON( "x" << 1.0 ) ) ) );
- }
- };
-
- template <typename M>
- class Size {
- public:
- void run() {
- M m(fromjson("{a:{$size:4}}"), MatchExpressionParser::WhereCallback());
- ASSERT( m.matches( fromjson( "{a:[1,2,3,4]}" ) ) );
- ASSERT( !m.matches( fromjson( "{a:[1,2,3]}" ) ) );
- ASSERT( !m.matches( fromjson( "{a:[1,2,3,'a','b']}" ) ) );
- ASSERT( !m.matches( fromjson( "{a:[[1,2,3,4]]}" ) ) );
- }
- };
-
- template <typename M>
- class WithinBox {
- public:
- void run() {
- M m(fromjson("{loc:{$within:{$box:[{x: 4, y:4},[6,6]]}}}"),
- MatchExpressionParser::WhereCallback());
- ASSERT(!m.matches(fromjson("{loc: [3,4]}")));
- ASSERT(m.matches(fromjson("{loc: [4,4]}")));
- ASSERT(m.matches(fromjson("{loc: [5,5]}")));
- ASSERT(m.matches(fromjson("{loc: [5,5.1]}")));
- ASSERT(m.matches(fromjson("{loc: {x: 5, y:5.1}}")));
- }
- };
-
- template <typename M>
- class WithinPolygon {
- public:
- void run() {
- M m(fromjson("{loc:{$within:{$polygon:[{x:0,y:0},[0,5],[5,5],[5,0]]}}}"),
- MatchExpressionParser::WhereCallback());
- ASSERT(m.matches(fromjson("{loc: [3,4]}")));
- ASSERT(m.matches(fromjson("{loc: [4,4]}")));
- ASSERT(m.matches(fromjson("{loc: {x:5,y:5}}")));
- ASSERT(!m.matches(fromjson("{loc: [5,5.1]}")));
- ASSERT(!m.matches(fromjson("{loc: {}}")));
- }
- };
-
- template <typename M>
- class WithinCenter {
- public:
- void run() {
- M m(fromjson("{loc:{$within:{$center:[{x:30,y:30},10]}}}"),
- MatchExpressionParser::WhereCallback());
- ASSERT(!m.matches(fromjson("{loc: [3,4]}")));
- ASSERT(m.matches(fromjson("{loc: {x:30,y:30}}")));
- ASSERT(m.matches(fromjson("{loc: [20,30]}")));
- ASSERT(m.matches(fromjson("{loc: [30,20]}")));
- ASSERT(m.matches(fromjson("{loc: [40,30]}")));
- ASSERT(m.matches(fromjson("{loc: [30,40]}")));
- ASSERT(!m.matches(fromjson("{loc: [31,40]}")));
- }
- };
-
- /** Test that MatchDetails::elemMatchKey() is set correctly after a match. */
- template <typename M>
- class ElemMatchKey {
- public:
- void run() {
- M matcher(BSON("a.b" << 1),
- MatchExpressionParser::WhereCallback());
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( matcher.matches( fromjson( "{ a:[ { b:1 } ] }" ), &details ) );
- // The '0' entry of the 'a' array is matched.
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( string( "0" ), details.elemMatchKey() );
- }
- };
-
- template <typename M>
- class WhereSimple1 {
- public:
- void run() {
- OperationContextImpl txn;
- AutoGetCollectionForRead ctx(&txn, "unittests.matchertests");
-
- M m(BSON("$where" << "function(){ return this.a == 1; }"),
- WhereCallbackReal(&txn, StringData("unittests")));
- ASSERT( m.matches( BSON( "a" << 1 ) ) );
- ASSERT( !m.matches( BSON( "a" << 2 ) ) );
- }
- };
-
- template< typename M >
- class TimingBase {
- public:
- long dotime( const BSONObj& patt , const BSONObj& obj ) {
- M m(patt, MatchExpressionParser::WhereCallback());
- Timer t;
- for ( int i=0; i<900000; i++ ) {
- if ( !m.matches( obj ) ) {
- ASSERT( 0 );
- }
- }
- return t.millis();
- }
- };
-
- template< typename M >
- class AllTiming : public TimingBase<M> {
- public:
- void run() {
- long normal = TimingBase<M>::dotime( BSON( "x" << 5 ),
- BSON( "x" << 5 ) );
-
- long all = TimingBase<M>::dotime( BSON( "x" << BSON( "$all" << BSON_ARRAY( 5 ) ) ),
- BSON( "x" << 5 ) );
-
- cout << "AllTiming " << demangleName(typeid(M))
- << " normal: " << normal << " all: " << all << endl;
+ {
+ BSONObjBuilder b;
+ b.append("a", 5);
+ ASSERT(!m.matches(b.done()));
}
- };
- class All : public Suite {
- public:
- All() : Suite( "matcher" ) {
+ {
+ BSONObjBuilder b;
+ b.append("a", 4);
+ ASSERT(m.matches(b.done()));
}
-
-#define ADD_BOTH(TEST) \
- add< TEST<Matcher> >();
-
- void setupTests() {
- ADD_BOTH(Basic);
- ADD_BOTH(DoubleEqual);
- ADD_BOTH(MixedNumericEqual);
- ADD_BOTH(MixedNumericGt);
- ADD_BOTH(MixedNumericIN);
- ADD_BOTH(Size);
- ADD_BOTH(MixedNumericEmbedded);
- ADD_BOTH(ElemMatchKey);
- ADD_BOTH(WhereSimple1);
- ADD_BOTH(AllTiming);
- ADD_BOTH(WithinBox);
- ADD_BOTH(WithinCenter);
- ADD_BOTH(WithinPolygon);
+ }
+};
+
+template <typename M>
+class MixedNumericEmbedded {
+public:
+ void run() {
+ M m(BSON("a" << BSON("x" << 1)), MatchExpressionParser::WhereCallback());
+ ASSERT(m.matches(BSON("a" << BSON("x" << 1))));
+ ASSERT(m.matches(BSON("a" << BSON("x" << 1.0))));
+ }
+};
+
+template <typename M>
+class Size {
+public:
+ void run() {
+ M m(fromjson("{a:{$size:4}}"), MatchExpressionParser::WhereCallback());
+ ASSERT(m.matches(fromjson("{a:[1,2,3,4]}")));
+ ASSERT(!m.matches(fromjson("{a:[1,2,3]}")));
+ ASSERT(!m.matches(fromjson("{a:[1,2,3,'a','b']}")));
+ ASSERT(!m.matches(fromjson("{a:[[1,2,3,4]]}")));
+ }
+};
+
+template <typename M>
+class WithinBox {
+public:
+ void run() {
+ M m(fromjson("{loc:{$within:{$box:[{x: 4, y:4},[6,6]]}}}"),
+ MatchExpressionParser::WhereCallback());
+ ASSERT(!m.matches(fromjson("{loc: [3,4]}")));
+ ASSERT(m.matches(fromjson("{loc: [4,4]}")));
+ ASSERT(m.matches(fromjson("{loc: [5,5]}")));
+ ASSERT(m.matches(fromjson("{loc: [5,5.1]}")));
+ ASSERT(m.matches(fromjson("{loc: {x: 5, y:5.1}}")));
+ }
+};
+
+template <typename M>
+class WithinPolygon {
+public:
+ void run() {
+ M m(fromjson("{loc:{$within:{$polygon:[{x:0,y:0},[0,5],[5,5],[5,0]]}}}"),
+ MatchExpressionParser::WhereCallback());
+ ASSERT(m.matches(fromjson("{loc: [3,4]}")));
+ ASSERT(m.matches(fromjson("{loc: [4,4]}")));
+ ASSERT(m.matches(fromjson("{loc: {x:5,y:5}}")));
+ ASSERT(!m.matches(fromjson("{loc: [5,5.1]}")));
+ ASSERT(!m.matches(fromjson("{loc: {}}")));
+ }
+};
+
+template <typename M>
+class WithinCenter {
+public:
+ void run() {
+ M m(fromjson("{loc:{$within:{$center:[{x:30,y:30},10]}}}"),
+ MatchExpressionParser::WhereCallback());
+ ASSERT(!m.matches(fromjson("{loc: [3,4]}")));
+ ASSERT(m.matches(fromjson("{loc: {x:30,y:30}}")));
+ ASSERT(m.matches(fromjson("{loc: [20,30]}")));
+ ASSERT(m.matches(fromjson("{loc: [30,20]}")));
+ ASSERT(m.matches(fromjson("{loc: [40,30]}")));
+ ASSERT(m.matches(fromjson("{loc: [30,40]}")));
+ ASSERT(!m.matches(fromjson("{loc: [31,40]}")));
+ }
+};
+
+/** Test that MatchDetails::elemMatchKey() is set correctly after a match. */
+template <typename M>
+class ElemMatchKey {
+public:
+ void run() {
+ M matcher(BSON("a.b" << 1), MatchExpressionParser::WhereCallback());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(matcher.matches(fromjson("{ a:[ { b:1 } ] }"), &details));
+ // The '0' entry of the 'a' array is matched.
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS(string("0"), details.elemMatchKey());
+ }
+};
+
+template <typename M>
+class WhereSimple1 {
+public:
+ void run() {
+ OperationContextImpl txn;
+ AutoGetCollectionForRead ctx(&txn, "unittests.matchertests");
+
+ M m(BSON("$where"
+ << "function(){ return this.a == 1; }"),
+ WhereCallbackReal(&txn, StringData("unittests")));
+ ASSERT(m.matches(BSON("a" << 1)));
+ ASSERT(!m.matches(BSON("a" << 2)));
+ }
+};
+
+template <typename M>
+class TimingBase {
+public:
+ long dotime(const BSONObj& patt, const BSONObj& obj) {
+ M m(patt, MatchExpressionParser::WhereCallback());
+ Timer t;
+ for (int i = 0; i < 900000; i++) {
+ if (!m.matches(obj)) {
+ ASSERT(0);
+ }
}
- };
-
- SuiteInstance<All> dball;
-
-} // namespace MatcherTests
-
+ return t.millis();
+ }
+};
+
+template <typename M>
+class AllTiming : public TimingBase<M> {
+public:
+ void run() {
+ long normal = TimingBase<M>::dotime(BSON("x" << 5), BSON("x" << 5));
+
+ long all =
+ TimingBase<M>::dotime(BSON("x" << BSON("$all" << BSON_ARRAY(5))), BSON("x" << 5));
+
+ cout << "AllTiming " << demangleName(typeid(M)) << " normal: " << normal << " all: " << all
+ << endl;
+ }
+};
+
+
+class All : public Suite {
+public:
+ All() : Suite("matcher") {}
+
+#define ADD_BOTH(TEST) add<TEST<Matcher>>();
+
+ void setupTests() {
+ ADD_BOTH(Basic);
+ ADD_BOTH(DoubleEqual);
+ ADD_BOTH(MixedNumericEqual);
+ ADD_BOTH(MixedNumericGt);
+ ADD_BOTH(MixedNumericIN);
+ ADD_BOTH(Size);
+ ADD_BOTH(MixedNumericEmbedded);
+ ADD_BOTH(ElemMatchKey);
+ ADD_BOTH(WhereSimple1);
+ ADD_BOTH(AllTiming);
+ ADD_BOTH(WithinBox);
+ ADD_BOTH(WithinCenter);
+ ADD_BOTH(WithinPolygon);
+ }
+};
+
+SuiteInstance<All> dball;
+
+} // namespace MatcherTests
diff --git a/src/mongo/dbtests/merge_chunk_tests.cpp b/src/mongo/dbtests/merge_chunk_tests.cpp
index 47e233483cd..8cb57e9d911 100644
--- a/src/mongo/dbtests/merge_chunk_tests.cpp
+++ b/src/mongo/dbtests/merge_chunk_tests.cpp
@@ -27,7 +27,7 @@
*/
#include "mongo/dbtests/config_server_fixture.h"
-#include "mongo/s/chunk.h" // for genID
+#include "mongo/s/chunk.h" // for genID
#include "mongo/s/chunk_version.h"
#include "mongo/s/collection_metadata.h"
#include "mongo/s/d_state.h"
@@ -39,332 +39,311 @@
namespace mongo {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
+
+/**
+ * Specialization of the config server fixture with helpers for the tests below.
+ */
+class MergeChunkFixture : public ConfigServerFixture {
+public:
+ MergeChunkFixture() : ConfigServerFixture() {}
/**
- * Specialization of the config server fixture with helpers for the tests below.
+ * Stores ranges for a particular collection and shard starting from some version
*/
- class MergeChunkFixture: public ConfigServerFixture {
- public:
-
- MergeChunkFixture() : ConfigServerFixture() {
-
- }
-
- /**
- * Stores ranges for a particular collection and shard starting from some version
- */
- void storeCollectionRanges( const NamespaceString& nss,
- const string& shardName,
- const vector<KeyRange>& ranges,
- const ChunkVersion& startVersion ) {
-
- // Get key pattern from first range
- ASSERT_GREATER_THAN( ranges.size(), 0u );
-
- CollectionType coll;
- coll.setNS( nss.ns() );
- coll.setKeyPattern( ranges.begin()->keyPattern );
- coll.setEpoch( startVersion.epoch() );
- coll.setUpdatedAt( 1ULL );
- string errMsg;
- ASSERT( coll.isValid( &errMsg ) );
-
- DBDirectClient client(&_txn);
-
- client.update( CollectionType::ConfigNS,
- BSON( CollectionType::ns( coll.getNS() ) ),
- coll.toBSON(), true, false );
-
- ChunkVersion nextVersion = startVersion;
- for ( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) {
-
- ChunkType chunk;
- // TODO: We should not rely on the serialized ns, minkey being unique in the future,
- // causes problems since it links string serialization to correctness.
- chunk.setName( Chunk::genID( nss, it->minKey ) );
- chunk.setShard( shardName );
- chunk.setNS( nss.ns() );
- chunk.setVersion( nextVersion );
- chunk.setMin( it->minKey );
- chunk.setMax( it->maxKey );
- nextVersion.incMajor();
-
- client.insert( ChunkType::ConfigNS, chunk.toBSON() );
- }
+ void storeCollectionRanges(const NamespaceString& nss,
+ const string& shardName,
+ const vector<KeyRange>& ranges,
+ const ChunkVersion& startVersion) {
+ // Get key pattern from first range
+ ASSERT_GREATER_THAN(ranges.size(), 0u);
+
+ CollectionType coll;
+ coll.setNS(nss.ns());
+ coll.setKeyPattern(ranges.begin()->keyPattern);
+ coll.setEpoch(startVersion.epoch());
+ coll.setUpdatedAt(1ULL);
+ string errMsg;
+ ASSERT(coll.isValid(&errMsg));
+
+ DBDirectClient client(&_txn);
+
+ client.update(CollectionType::ConfigNS,
+ BSON(CollectionType::ns(coll.getNS())),
+ coll.toBSON(),
+ true,
+ false);
+
+ ChunkVersion nextVersion = startVersion;
+ for (vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it) {
+ ChunkType chunk;
+ // TODO: We should not rely on the serialized ns, minkey being unique in the future,
+ // causes problems since it links string serialization to correctness.
+ chunk.setName(Chunk::genID(nss, it->minKey));
+ chunk.setShard(shardName);
+ chunk.setNS(nss.ns());
+ chunk.setVersion(nextVersion);
+ chunk.setMin(it->minKey);
+ chunk.setMax(it->maxKey);
+ nextVersion.incMajor();
+
+ client.insert(ChunkType::ConfigNS, chunk.toBSON());
}
+ }
- /**
- * Makes sure that all the ranges here no longer exist on disk but the merged range does
- */
- void assertWrittenAsMerged( const vector<KeyRange>& ranges ) {
-
- dumpServer();
-
- BSONObj rangeMin;
- BSONObj rangeMax;
-
- DBDirectClient client(&_txn);
+ /**
+ * Makes sure that all the ranges here no longer exist on disk but the merged range does
+ */
+ void assertWrittenAsMerged(const vector<KeyRange>& ranges) {
+ dumpServer();
- // Ensure written
- for( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) {
+ BSONObj rangeMin;
+ BSONObj rangeMax;
- Query query( BSON( ChunkType::min( it->minKey ) <<
- ChunkType::max( it->maxKey ) <<
- ChunkType::shard( shardName() ) ) );
- ASSERT(client.findOne(ChunkType::ConfigNS, query).isEmpty());
+ DBDirectClient client(&_txn);
- if ( rangeMin.isEmpty() || rangeMin.woCompare( it->minKey ) > 0 ) {
- rangeMin = it->minKey;
- }
+ // Ensure written
+ for (vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it) {
+ Query query(BSON(ChunkType::min(it->minKey) << ChunkType::max(it->maxKey)
+ << ChunkType::shard(shardName())));
+ ASSERT(client.findOne(ChunkType::ConfigNS, query).isEmpty());
- if ( rangeMax.isEmpty() || rangeMax.woCompare( it->maxKey ) < 0 ) {
- rangeMax = it->maxKey;
- }
+ if (rangeMin.isEmpty() || rangeMin.woCompare(it->minKey) > 0) {
+ rangeMin = it->minKey;
}
- Query query( BSON( ChunkType::min( rangeMin ) <<
- ChunkType::max( rangeMax ) <<
- ChunkType::shard( shardName() ) ) );
- ASSERT(!client.findOne(ChunkType::ConfigNS, query).isEmpty());
- }
-
- string shardName() { return "shard0000"; }
-
- protected:
-
- virtual void setUp() {
- ConfigServerFixture::setUp();
- shardingState.initialize( configSvr().toString() );
- shardingState.gotShardName( shardName() );
- }
-
- virtual void tearDown() {
- shardingState.resetShardingState();
- ConfigServerFixture::tearDown();
+ if (rangeMax.isEmpty() || rangeMax.woCompare(it->maxKey) < 0) {
+ rangeMax = it->maxKey;
+ }
}
- };
-
- //
- // Tests for upgrading the config server between versions.
- //
- // In general these tests do pretty minimal validation of the config server data itself, but
- // do ensure that the upgrade mechanism is working correctly w.r.t the config.version
- // collection.
- //
-
- // Rename the fixture so that our tests have a useful name in the executable
- typedef MergeChunkFixture MergeChunkTests;
-
- TEST_F(MergeChunkTests, FailedMerge) {
-
- const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 );
- const OID epoch = OID::gen();
- vector<KeyRange> ranges;
-
- // Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 ), BSON( "x" << 10 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 10 ), BSON( "x" << 20 ), kp ) );
- storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
-
- // Do bad merges
- string errMsg;
- bool result;
-
- result = mergeChunks(&_txn, nss, BSON( "x" << 5 ), BSON( "x" << 20 ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
-
- result = mergeChunks(&_txn, nss, BSON( "x" << 0 ), BSON( "x" << 15 ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
-
- result = mergeChunks(&_txn, nss, BSON( "x" << -10 ), BSON( "x" << 20 ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
-
- result = mergeChunks(&_txn, nss, BSON( "x" << 0 ), BSON( "x" << 30 ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
-
- result = mergeChunks(&_txn, nss, BSON( "x" << 0 ), BSON( "x" << 10 ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
-
- // Wrong epoch
- result = mergeChunks(&_txn, nss, BSON( "x" << 0 ),
- BSON( "x" << 10 ), OID::gen(), &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
- }
-
- TEST_F(MergeChunkTests, FailedMergeHole) {
- const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 );
- const OID epoch = OID::gen();
- vector<KeyRange> ranges;
-
- // Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 ), BSON( "x" << 10 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 11 ), BSON( "x" << 20 ), kp ) );
- storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
-
- // Do bad merge with hole
- string errMsg;
- bool result;
- result = mergeChunks(&_txn, nss, BSON( "x" << 0 ), BSON( "x" << 20 ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
+ Query query(BSON(ChunkType::min(rangeMin) << ChunkType::max(rangeMax)
+ << ChunkType::shard(shardName())));
+ ASSERT(!client.findOne(ChunkType::ConfigNS, query).isEmpty());
}
- TEST_F(MergeChunkTests, FailedMergeMinMax) {
-
- const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 );
- const OID epoch = OID::gen();
- vector<KeyRange> ranges;
-
- // Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << MINKEY ), BSON( "x" << 0 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 ), BSON( "x" << MAXKEY ), kp ) );
- storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
-
- // Do bad merge with hole
- string errMsg;
- bool result;
- result = mergeChunks(&_txn, nss, BSON( "x" << -1 ),
- BSON( "x" << MAXKEY ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
-
- result = mergeChunks(&_txn, nss, BSON( "x" << MINKEY ),
- BSON( "x" << 1 ), epoch, &errMsg );
- ASSERT_NOT_EQUALS( errMsg, "" );
- ASSERT( !result );
+ string shardName() {
+ return "shard0000";
}
- TEST_F(MergeChunkTests, BasicMerge) {
-
- const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 );
- const OID epoch = OID::gen();
- vector<KeyRange> ranges;
-
- // Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 ), BSON( "x" << 1 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 1 ), BSON( "x" << 2 ), kp ) );
- storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
-
- // Get latest version
- ChunkVersion latestVersion;
- shardingState.refreshMetadataNow(&_txn, nss, &latestVersion );
- shardingState.resetMetadata( nss );
-
- // Do merge
- string errMsg;
- bool result = mergeChunks(&_txn, nss, BSON( "x" << 0 ), BSON( "x" << 2 ), epoch, &errMsg );
- ASSERT_EQUALS( errMsg, "" );
- ASSERT( result );
-
- // Verify result
- CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss );
-
- ChunkType chunk;
- ASSERT( metadata->getNextChunk( BSON( "x" << 0 ), &chunk ) );
- ASSERT( chunk.getMin().woCompare( BSON( "x" << 0 ) ) == 0 );
- ASSERT( chunk.getMax().woCompare( BSON( "x" << 2 ) ) == 0 );
- ASSERT_EQUALS( metadata->getNumChunks(), 1u );
-
- ASSERT_EQUALS( metadata->getShardVersion().majorVersion(), latestVersion.majorVersion() );
- ASSERT_GREATER_THAN( metadata->getShardVersion().minorVersion(),
- latestVersion.minorVersion() );
-
- assertWrittenAsMerged( ranges );
+protected:
+ virtual void setUp() {
+ ConfigServerFixture::setUp();
+ shardingState.initialize(configSvr().toString());
+ shardingState.gotShardName(shardName());
}
- TEST_F(MergeChunkTests, BasicMergeMinMax ) {
-
- const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 );
- const OID epoch = OID::gen();
- vector<KeyRange> ranges;
-
- // Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << MINKEY ), BSON( "x" << 0 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 ), BSON( "x" << MAXKEY ), kp ) );
- storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
-
- // Get latest version
- ChunkVersion latestVersion;
- shardingState.refreshMetadataNow(&_txn, nss, &latestVersion);
- shardingState.resetMetadata( nss );
-
- // Do merge
- string errMsg;
- bool result = mergeChunks(&_txn, nss, BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ), epoch, &errMsg );
- ASSERT_EQUALS( errMsg, "" );
- ASSERT( result );
-
- // Verify result
- CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss );
-
- ChunkType chunk;
- ASSERT( metadata->getNextChunk( BSON( "x" << MINKEY ), &chunk ) );
- ASSERT( chunk.getMin().woCompare( BSON( "x" << MINKEY ) ) == 0 );
- ASSERT( chunk.getMax().woCompare( BSON( "x" << MAXKEY ) ) == 0 );
- ASSERT_EQUALS( metadata->getNumChunks(), 1u );
-
- ASSERT_EQUALS( metadata->getShardVersion().majorVersion(), latestVersion.majorVersion() );
- ASSERT_GREATER_THAN( metadata->getShardVersion().minorVersion(),
- latestVersion.minorVersion() );
-
- assertWrittenAsMerged( ranges );
- }
-
- TEST_F(MergeChunkTests, CompoundMerge ) {
-
- const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 << "y" << 1 );
- const OID epoch = OID::gen();
- vector<KeyRange> ranges;
-
- // Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 << "y" << 1 ),
- BSON( "x" << 1 << "y" << 0 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 1 << "y" << 0 ),
- BSON( "x" << 2 << "y" << 1 ), kp ) );
- storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
-
- // Get latest version
- ChunkVersion latestVersion;
- shardingState.refreshMetadataNow(&_txn, nss, &latestVersion);
- shardingState.resetMetadata( nss );
-
- // Do merge
- string errMsg;
- bool result = mergeChunks(&_txn, nss, BSON( "x" << 0 << "y" << 1 ),
- BSON( "x" << 2 << "y" << 1 ), epoch, &errMsg );
- ASSERT_EQUALS( errMsg, "" );
- ASSERT( result );
-
- // Verify result
- CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss );
-
- ChunkType chunk;
- ASSERT( metadata->getNextChunk( BSON( "x" << 0 << "y" << 1 ), &chunk ) );
- ASSERT( chunk.getMin().woCompare( BSON( "x" << 0 << "y" << 1 ) ) == 0 );
- ASSERT( chunk.getMax().woCompare( BSON( "x" << 2 << "y" << 1 ) ) == 0 );
- ASSERT_EQUALS( metadata->getNumChunks(), 1u );
-
- ASSERT_EQUALS( metadata->getShardVersion().majorVersion(), latestVersion.majorVersion() );
- ASSERT_GREATER_THAN( metadata->getShardVersion().minorVersion(),
- latestVersion.minorVersion() );
-
- assertWrittenAsMerged( ranges );
+ virtual void tearDown() {
+ shardingState.resetShardingState();
+ ConfigServerFixture::tearDown();
}
-
-} // end namespace
+};
+
+//
+// Tests for upgrading the config server between versions.
+//
+// In general these tests do pretty minimal validation of the config server data itself, but
+// do ensure that the upgrade mechanism is working correctly w.r.t the config.version
+// collection.
+//
+
+// Rename the fixture so that our tests have a useful name in the executable
+typedef MergeChunkFixture MergeChunkTests;
+
+TEST_F(MergeChunkTests, FailedMerge) {
+ const NamespaceString nss("foo.bar");
+ const BSONObj kp = BSON("x" << 1);
+ const OID epoch = OID::gen();
+ vector<KeyRange> ranges;
+
+ // Setup chunk metadata
+ ranges.push_back(KeyRange(nss, BSON("x" << 0), BSON("x" << 10), kp));
+ ranges.push_back(KeyRange(nss, BSON("x" << 10), BSON("x" << 20), kp));
+ storeCollectionRanges(nss, shardName(), ranges, ChunkVersion(1, 0, epoch));
+
+ // Do bad merges
+ string errMsg;
+ bool result;
+
+ result = mergeChunks(&_txn, nss, BSON("x" << 5), BSON("x" << 20), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+
+ result = mergeChunks(&_txn, nss, BSON("x" << 0), BSON("x" << 15), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+
+ result = mergeChunks(&_txn, nss, BSON("x" << -10), BSON("x" << 20), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+
+ result = mergeChunks(&_txn, nss, BSON("x" << 0), BSON("x" << 30), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+
+ result = mergeChunks(&_txn, nss, BSON("x" << 0), BSON("x" << 10), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+
+ // Wrong epoch
+ result = mergeChunks(&_txn, nss, BSON("x" << 0), BSON("x" << 10), OID::gen(), &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+}
+
+TEST_F(MergeChunkTests, FailedMergeHole) {
+ const NamespaceString nss("foo.bar");
+ const BSONObj kp = BSON("x" << 1);
+ const OID epoch = OID::gen();
+ vector<KeyRange> ranges;
+
+ // Setup chunk metadata
+ ranges.push_back(KeyRange(nss, BSON("x" << 0), BSON("x" << 10), kp));
+ ranges.push_back(KeyRange(nss, BSON("x" << 11), BSON("x" << 20), kp));
+ storeCollectionRanges(nss, shardName(), ranges, ChunkVersion(1, 0, epoch));
+
+ // Do bad merge with hole
+ string errMsg;
+ bool result;
+ result = mergeChunks(&_txn, nss, BSON("x" << 0), BSON("x" << 20), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+}
+
+TEST_F(MergeChunkTests, FailedMergeMinMax) {
+ const NamespaceString nss("foo.bar");
+ const BSONObj kp = BSON("x" << 1);
+ const OID epoch = OID::gen();
+ vector<KeyRange> ranges;
+
+ // Setup chunk metadata
+ ranges.push_back(KeyRange(nss, BSON("x" << MINKEY), BSON("x" << 0), kp));
+ ranges.push_back(KeyRange(nss, BSON("x" << 0), BSON("x" << MAXKEY), kp));
+ storeCollectionRanges(nss, shardName(), ranges, ChunkVersion(1, 0, epoch));
+
+ // Do bad merge with hole
+ string errMsg;
+ bool result;
+ result = mergeChunks(&_txn, nss, BSON("x" << -1), BSON("x" << MAXKEY), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+
+ result = mergeChunks(&_txn, nss, BSON("x" << MINKEY), BSON("x" << 1), epoch, &errMsg);
+ ASSERT_NOT_EQUALS(errMsg, "");
+ ASSERT(!result);
+}
+
+TEST_F(MergeChunkTests, BasicMerge) {
+ const NamespaceString nss("foo.bar");
+ const BSONObj kp = BSON("x" << 1);
+ const OID epoch = OID::gen();
+ vector<KeyRange> ranges;
+
+ // Setup chunk metadata
+ ranges.push_back(KeyRange(nss, BSON("x" << 0), BSON("x" << 1), kp));
+ ranges.push_back(KeyRange(nss, BSON("x" << 1), BSON("x" << 2), kp));
+ storeCollectionRanges(nss, shardName(), ranges, ChunkVersion(1, 0, epoch));
+
+ // Get latest version
+ ChunkVersion latestVersion;
+ shardingState.refreshMetadataNow(&_txn, nss, &latestVersion);
+ shardingState.resetMetadata(nss);
+
+ // Do merge
+ string errMsg;
+ bool result = mergeChunks(&_txn, nss, BSON("x" << 0), BSON("x" << 2), epoch, &errMsg);
+ ASSERT_EQUALS(errMsg, "");
+ ASSERT(result);
+
+ // Verify result
+ CollectionMetadataPtr metadata = shardingState.getCollectionMetadata(nss);
+
+ ChunkType chunk;
+ ASSERT(metadata->getNextChunk(BSON("x" << 0), &chunk));
+ ASSERT(chunk.getMin().woCompare(BSON("x" << 0)) == 0);
+ ASSERT(chunk.getMax().woCompare(BSON("x" << 2)) == 0);
+ ASSERT_EQUALS(metadata->getNumChunks(), 1u);
+
+ ASSERT_EQUALS(metadata->getShardVersion().majorVersion(), latestVersion.majorVersion());
+ ASSERT_GREATER_THAN(metadata->getShardVersion().minorVersion(), latestVersion.minorVersion());
+
+ assertWrittenAsMerged(ranges);
+}
+
+TEST_F(MergeChunkTests, BasicMergeMinMax) {
+ const NamespaceString nss("foo.bar");
+ const BSONObj kp = BSON("x" << 1);
+ const OID epoch = OID::gen();
+ vector<KeyRange> ranges;
+
+ // Setup chunk metadata
+ ranges.push_back(KeyRange(nss, BSON("x" << MINKEY), BSON("x" << 0), kp));
+ ranges.push_back(KeyRange(nss, BSON("x" << 0), BSON("x" << MAXKEY), kp));
+ storeCollectionRanges(nss, shardName(), ranges, ChunkVersion(1, 0, epoch));
+
+ // Get latest version
+ ChunkVersion latestVersion;
+ shardingState.refreshMetadataNow(&_txn, nss, &latestVersion);
+ shardingState.resetMetadata(nss);
+
+ // Do merge
+ string errMsg;
+ bool result = mergeChunks(&_txn, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY), epoch, &errMsg);
+ ASSERT_EQUALS(errMsg, "");
+ ASSERT(result);
+
+ // Verify result
+ CollectionMetadataPtr metadata = shardingState.getCollectionMetadata(nss);
+
+ ChunkType chunk;
+ ASSERT(metadata->getNextChunk(BSON("x" << MINKEY), &chunk));
+ ASSERT(chunk.getMin().woCompare(BSON("x" << MINKEY)) == 0);
+ ASSERT(chunk.getMax().woCompare(BSON("x" << MAXKEY)) == 0);
+ ASSERT_EQUALS(metadata->getNumChunks(), 1u);
+
+ ASSERT_EQUALS(metadata->getShardVersion().majorVersion(), latestVersion.majorVersion());
+ ASSERT_GREATER_THAN(metadata->getShardVersion().minorVersion(), latestVersion.minorVersion());
+
+ assertWrittenAsMerged(ranges);
+}
+
+TEST_F(MergeChunkTests, CompoundMerge) {
+ const NamespaceString nss("foo.bar");
+ const BSONObj kp = BSON("x" << 1 << "y" << 1);
+ const OID epoch = OID::gen();
+ vector<KeyRange> ranges;
+
+ // Setup chunk metadata
+ ranges.push_back(KeyRange(nss, BSON("x" << 0 << "y" << 1), BSON("x" << 1 << "y" << 0), kp));
+ ranges.push_back(KeyRange(nss, BSON("x" << 1 << "y" << 0), BSON("x" << 2 << "y" << 1), kp));
+ storeCollectionRanges(nss, shardName(), ranges, ChunkVersion(1, 0, epoch));
+
+ // Get latest version
+ ChunkVersion latestVersion;
+ shardingState.refreshMetadataNow(&_txn, nss, &latestVersion);
+ shardingState.resetMetadata(nss);
+
+ // Do merge
+ string errMsg;
+ bool result = mergeChunks(
+ &_txn, nss, BSON("x" << 0 << "y" << 1), BSON("x" << 2 << "y" << 1), epoch, &errMsg);
+ ASSERT_EQUALS(errMsg, "");
+ ASSERT(result);
+
+ // Verify result
+ CollectionMetadataPtr metadata = shardingState.getCollectionMetadata(nss);
+
+ ChunkType chunk;
+ ASSERT(metadata->getNextChunk(BSON("x" << 0 << "y" << 1), &chunk));
+ ASSERT(chunk.getMin().woCompare(BSON("x" << 0 << "y" << 1)) == 0);
+ ASSERT(chunk.getMax().woCompare(BSON("x" << 2 << "y" << 1)) == 0);
+ ASSERT_EQUALS(metadata->getNumChunks(), 1u);
+
+ ASSERT_EQUALS(metadata->getShardVersion().majorVersion(), latestVersion.majorVersion());
+ ASSERT_GREATER_THAN(metadata->getShardVersion().minorVersion(), latestVersion.minorVersion());
+
+ assertWrittenAsMerged(ranges);
+}
+
+} // end namespace
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
index 6f2cad63043..43912b0125e 100644
--- a/src/mongo/dbtests/mmaptests.cpp
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -47,148 +47,148 @@
namespace MMapTests {
- using std::endl;
- using std::string;
-
- class LeakTest {
- const string fn;
- const int optOld;
- public:
- LeakTest() :
- fn((boost::filesystem::path(storageGlobalParams.dbpath) / "testfile.map").string()),
- optOld(mmapv1GlobalOptions.journalOptions)
- {
- mmapv1GlobalOptions.journalOptions = 0; // DurParanoid doesn't make sense with this test
+using std::endl;
+using std::string;
+
+class LeakTest {
+ const string fn;
+ const int optOld;
+
+public:
+ LeakTest()
+ : fn((boost::filesystem::path(storageGlobalParams.dbpath) / "testfile.map").string()),
+ optOld(mmapv1GlobalOptions.journalOptions) {
+ mmapv1GlobalOptions.journalOptions = 0; // DurParanoid doesn't make sense with this test
+ }
+ ~LeakTest() {
+ mmapv1GlobalOptions.journalOptions = optOld;
+ try {
+ boost::filesystem::remove(fn);
+ } catch (...) {
}
- ~LeakTest() {
- mmapv1GlobalOptions.journalOptions = optOld;
- try { boost::filesystem::remove(fn); }
- catch(...) { }
+ }
+ void run() {
+ try {
+ boost::filesystem::remove(fn);
+ } catch (...) {
}
- void run() {
-
- try { boost::filesystem::remove(fn); }
- catch(...) { }
- MMAPV1LockerImpl lockState;
- Lock::GlobalWrite lk(&lockState);
+ MMAPV1LockerImpl lockState;
+ Lock::GlobalWrite lk(&lockState);
+ {
+ DurableMappedFile f;
+ unsigned long long len = 256 * 1024 * 1024;
+ verify(f.create(fn, len, /*sequential*/ false));
{
- DurableMappedFile f;
- unsigned long long len = 256 * 1024 * 1024;
- verify( f.create(fn, len, /*sequential*/false) );
- {
- char *p = (char *) f.getView();
- verify(p);
- // write something to the private view as a test
- if (storageGlobalParams.dur)
- privateViews.makeWritable(p, 6);
- strcpy(p, "hello");
- }
- if (storageGlobalParams.dur) {
- char *w = (char *) f.view_write();
- strcpy(w + 6, "world");
- }
- MongoFileFinder ff;
- ASSERT( ff.findByPath(fn) );
- ASSERT( ff.findByPath("asdf") == 0 );
+ char* p = (char*)f.getView();
+ verify(p);
+ // write something to the private view as a test
+ if (storageGlobalParams.dur)
+ privateViews.makeWritable(p, 6);
+ strcpy(p, "hello");
}
- {
- MongoFileFinder ff;
- ASSERT( ff.findByPath(fn) == 0 );
+ if (storageGlobalParams.dur) {
+ char* w = (char*)f.view_write();
+ strcpy(w + 6, "world");
}
+ MongoFileFinder ff;
+ ASSERT(ff.findByPath(fn));
+ ASSERT(ff.findByPath("asdf") == 0);
+ }
+ {
+ MongoFileFinder ff;
+ ASSERT(ff.findByPath(fn) == 0);
+ }
- int N = 10000;
+ int N = 10000;
#if !defined(_WIN32) && !defined(__linux__)
- // seems this test is slow on OS X.
- N = 100;
+ // seems this test is slow on OS X.
+ N = 100;
#endif
- // we make a lot here -- if we were leaking, presumably it would fail doing this many.
- Timer t;
- for( int i = 0; i < N; i++ ) {
- DurableMappedFile f;
- verify( f.open(fn, i%4==1) );
- {
- char *p = (char *) f.getView();
- verify(p);
- if (storageGlobalParams.dur)
- privateViews.makeWritable(p, 4);
- strcpy(p, "zzz");
- }
- if (storageGlobalParams.dur) {
- char *w = (char *) f.view_write();
- if( i % 2 == 0 )
- ++(*w);
- verify( w[6] == 'w' );
- }
+ // we make a lot here -- if we were leaking, presumably it would fail doing this many.
+ Timer t;
+ for (int i = 0; i < N; i++) {
+ DurableMappedFile f;
+ verify(f.open(fn, i % 4 == 1));
+ {
+ char* p = (char*)f.getView();
+ verify(p);
+ if (storageGlobalParams.dur)
+ privateViews.makeWritable(p, 4);
+ strcpy(p, "zzz");
}
- if( t.millis() > 10000 ) {
- mongo::unittest::log() << "warning: MMap LeakTest is unusually slow N:" << N <<
- ' ' << t.millis() << "ms" << endl;
+ if (storageGlobalParams.dur) {
+ char* w = (char*)f.view_write();
+ if (i % 2 == 0)
+ ++(*w);
+ verify(w[6] == 'w');
}
-
}
- };
-
- class ExtentSizing {
- public:
- void run() {
- MmapV1ExtentManager em( "x", "x", false );
+ if (t.millis() > 10000) {
+ mongo::unittest::log() << "warning: MMap LeakTest is unusually slow N:" << N << ' '
+ << t.millis() << "ms" << endl;
+ }
+ }
+};
- ASSERT_EQUALS( em.maxSize(), em.quantizeExtentSize( em.maxSize() ) );
+class ExtentSizing {
+public:
+ void run() {
+ MmapV1ExtentManager em("x", "x", false);
- // test that no matter what we start with, we always get to max extent size
- for ( int obj=16; obj<BSONObjMaxUserSize; obj += 111 ) {
+ ASSERT_EQUALS(em.maxSize(), em.quantizeExtentSize(em.maxSize()));
- int sz = em.initialSize( obj );
+ // test that no matter what we start with, we always get to max extent size
+ for (int obj = 16; obj < BSONObjMaxUserSize; obj += 111) {
+ int sz = em.initialSize(obj);
- double totalExtentSize = sz;
+ double totalExtentSize = sz;
- int numFiles = 1;
- int sizeLeftInExtent = em.maxSize() - 1;
+ int numFiles = 1;
+ int sizeLeftInExtent = em.maxSize() - 1;
- for ( int i=0; i<100; i++ ) {
- sz = em.followupSize( obj , sz );
- ASSERT( sz >= obj );
- ASSERT( sz >= em.minSize() );
- ASSERT( sz <= em.maxSize() );
- ASSERT( sz <= em.maxSize() );
+ for (int i = 0; i < 100; i++) {
+ sz = em.followupSize(obj, sz);
+ ASSERT(sz >= obj);
+ ASSERT(sz >= em.minSize());
+ ASSERT(sz <= em.maxSize());
+ ASSERT(sz <= em.maxSize());
- totalExtentSize += sz;
+ totalExtentSize += sz;
- if ( sz < sizeLeftInExtent ) {
- sizeLeftInExtent -= sz;
- }
- else {
- numFiles++;
- sizeLeftInExtent = em.maxSize() - sz;
- }
+ if (sz < sizeLeftInExtent) {
+ sizeLeftInExtent -= sz;
+ } else {
+ numFiles++;
+ sizeLeftInExtent = em.maxSize() - sz;
}
- ASSERT_EQUALS( em.maxSize(), sz );
+ }
+ ASSERT_EQUALS(em.maxSize(), sz);
- double allocatedOnDisk = (double)numFiles * em.maxSize();
+ double allocatedOnDisk = (double)numFiles * em.maxSize();
- ASSERT( ( totalExtentSize / allocatedOnDisk ) > .95 );
+ ASSERT((totalExtentSize / allocatedOnDisk) > .95);
- invariant( em.numFiles() == 0 );
- }
+ invariant(em.numFiles() == 0);
}
- };
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "mmap" ) {}
- void setupTests() {
- if (!getGlobalEnvironment()->getGlobalStorageEngine()->isMmapV1())
- return;
+class All : public Suite {
+public:
+ All() : Suite("mmap") {}
+ void setupTests() {
+ if (!getGlobalEnvironment()->getGlobalStorageEngine()->isMmapV1())
+ return;
- add< LeakTest >();
- add< ExtentSizing >();
- }
- };
+ add<LeakTest>();
+ add<ExtentSizing>();
+ }
+};
- SuiteInstance<All> myall;
+SuiteInstance<All> myall;
#if 0
@@ -296,5 +296,4 @@ namespace MMapTests {
} myall;
#endif
-
}
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.cpp b/src/mongo/dbtests/mock/mock_conn_registry.cpp
index c50e177667f..a972ee15839 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.cpp
+++ b/src/mongo/dbtests/mock/mock_conn_registry.cpp
@@ -33,80 +33,74 @@
namespace mongo {
- using std::string;
+using std::string;
- boost::scoped_ptr<MockConnRegistry> MockConnRegistry::_instance;
+boost::scoped_ptr<MockConnRegistry> MockConnRegistry::_instance;
- MONGO_INITIALIZER(MockConnRegistry)(InitializerContext* context) {
- return MockConnRegistry::init();
- }
+MONGO_INITIALIZER(MockConnRegistry)(InitializerContext* context) {
+ return MockConnRegistry::init();
+}
- Status MockConnRegistry::init() {
- MockConnRegistry::_instance.reset(new MockConnRegistry());
- return Status::OK();
- }
+Status MockConnRegistry::init() {
+ MockConnRegistry::_instance.reset(new MockConnRegistry());
+ return Status::OK();
+}
- MockConnRegistry::MockConnRegistry():
- _mockConnStrHook(this),
- _registryMutex("mockConnRegistryMutex") {
- }
+MockConnRegistry::MockConnRegistry()
+ : _mockConnStrHook(this), _registryMutex("mockConnRegistryMutex") {}
- MockConnRegistry* MockConnRegistry::get() {
- return _instance.get();
- }
-
- ConnectionString::ConnectionHook* MockConnRegistry::getConnStrHook() {
- return &_mockConnStrHook;
- }
+MockConnRegistry* MockConnRegistry::get() {
+ return _instance.get();
+}
- void MockConnRegistry::addServer(MockRemoteDBServer* server) {
- scoped_lock sl(_registryMutex);
+ConnectionString::ConnectionHook* MockConnRegistry::getConnStrHook() {
+ return &_mockConnStrHook;
+}
- const std::string hostName(server->getServerAddress());
- fassert(16533, _registry.count(hostName) == 0);
+void MockConnRegistry::addServer(MockRemoteDBServer* server) {
+ scoped_lock sl(_registryMutex);
- _registry[hostName] = server;
- }
+ const std::string hostName(server->getServerAddress());
+ fassert(16533, _registry.count(hostName) == 0);
- bool MockConnRegistry::removeServer(const std::string& hostName) {
- scoped_lock sl(_registryMutex);
- return _registry.erase(hostName) == 1;
- }
+ _registry[hostName] = server;
+}
- void MockConnRegistry::clear() {
- scoped_lock sl(_registryMutex);
- _registry.clear();
- }
+bool MockConnRegistry::removeServer(const std::string& hostName) {
+ scoped_lock sl(_registryMutex);
+ return _registry.erase(hostName) == 1;
+}
- MockDBClientConnection* MockConnRegistry::connect(const std::string& connStr) {
- scoped_lock sl(_registryMutex);
- fassert(16534, _registry.count(connStr) == 1);
- return new MockDBClientConnection(_registry[connStr], true);
- }
+void MockConnRegistry::clear() {
+ scoped_lock sl(_registryMutex);
+ _registry.clear();
+}
- MockConnRegistry::MockConnHook::MockConnHook(MockConnRegistry* registry):
- _registry(registry) {
- }
+MockDBClientConnection* MockConnRegistry::connect(const std::string& connStr) {
+ scoped_lock sl(_registryMutex);
+ fassert(16534, _registry.count(connStr) == 1);
+ return new MockDBClientConnection(_registry[connStr], true);
+}
- MockConnRegistry::MockConnHook::~MockConnHook() {
- }
+MockConnRegistry::MockConnHook::MockConnHook(MockConnRegistry* registry) : _registry(registry) {}
- mongo::DBClientBase* MockConnRegistry::MockConnHook::connect(
- const ConnectionString& connString,
- std::string& errmsg,
- double socketTimeout) {
- const string hostName(connString.toString());
- MockDBClientConnection* conn = _registry->connect(hostName);
+MockConnRegistry::MockConnHook::~MockConnHook() {}
- if (!conn->connect(hostName.c_str(), errmsg)) {
- // Assumption: connect never throws, so no leak.
- delete conn;
+mongo::DBClientBase* MockConnRegistry::MockConnHook::connect(const ConnectionString& connString,
+ std::string& errmsg,
+ double socketTimeout) {
+ const string hostName(connString.toString());
+ MockDBClientConnection* conn = _registry->connect(hostName);
- // mimic ConnectionString::connect for MASTER type connection to return NULL
- // if the destination is unreachable.
- return NULL;
- }
+ if (!conn->connect(hostName.c_str(), errmsg)) {
+ // Assumption: connect never throws, so no leak.
+ delete conn;
- return conn;
+ // mimic ConnectionString::connect for MASTER type connection to return NULL
+ // if the destination is unreachable.
+ return NULL;
}
+
+ return conn;
+}
}
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.h b/src/mongo/dbtests/mock/mock_conn_registry.h
index f87196748ab..4e844ff4dc6 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.h
+++ b/src/mongo/dbtests/mock/mock_conn_registry.h
@@ -38,85 +38,85 @@
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
+/**
+ * Registry for storing mock servers and can create mock connections to these
+ * servers.
+ */
+class MockConnRegistry {
+public:
/**
- * Registry for storing mock servers and can create mock connections to these
- * servers.
+ * Initializes the static instance.
*/
- class MockConnRegistry {
- public:
- /**
- * Initializes the static instance.
- */
- static Status init();
+ static Status init();
- /**
- * @return the singleton registry. If this needs to be called before main(),
- * then the initializer method should depend on "MockConnRegistry".
- */
- static MockConnRegistry* get();
+ /**
+ * @return the singleton registry. If this needs to be called before main(),
+ * then the initializer method should depend on "MockConnRegistry".
+ */
+ static MockConnRegistry* get();
- /**
- * Adds a server to this registry.
- *
- * @param server the server to add. Caller is responsible for disposing
- * the server.
- */
- void addServer(MockRemoteDBServer* server);
+ /**
+ * Adds a server to this registry.
+ *
+ * @param server the server to add. Caller is responsible for disposing
+ * the server.
+ */
+ void addServer(MockRemoteDBServer* server);
- /**
- * Removes the server from this registry.
- *
- * @param hostName the host name of the server to remove.
- *
- * @return true if the server is in the registry and was removed.
- */
- bool removeServer(const std::string& hostName);
+ /**
+ * Removes the server from this registry.
+ *
+ * @param hostName the host name of the server to remove.
+ *
+ * @return true if the server is in the registry and was removed.
+ */
+ bool removeServer(const std::string& hostName);
- /**
- * Clears the registry.
- */
- void clear();
+ /**
+ * Clears the registry.
+ */
+ void clear();
- /**
- * @return a new mocked connection to a server with the given hostName.
- */
- MockDBClientConnection* connect(const std::string& hostName);
+ /**
+ * @return a new mocked connection to a server with the given hostName.
+ */
+ MockDBClientConnection* connect(const std::string& hostName);
+
+ /**
+ * @return the hook that can be used with ConnectionString.
+ */
+ ConnectionString::ConnectionHook* getConnStrHook();
+private:
+ class MockConnHook : public ConnectionString::ConnectionHook {
+ public:
/**
- * @return the hook that can be used with ConnectionString.
+ * Creates a new connection hook for the ConnectionString class that
+ * can create mock connections to mock replica set members using their
+ * pseudo host names.
+ *
+ * @param replSet the mock replica set. Caller is responsible for managing
+ * replSet and making sure that it lives longer than this object.
*/
- ConnectionString::ConnectionHook* getConnStrHook();
+ MockConnHook(MockConnRegistry* registry);
+ ~MockConnHook();
+
+ mongo::DBClientBase* connect(const mongo::ConnectionString& connString,
+ std::string& errmsg,
+ double socketTimeout);
private:
- class MockConnHook: public ConnectionString::ConnectionHook {
- public:
- /**
- * Creates a new connection hook for the ConnectionString class that
- * can create mock connections to mock replica set members using their
- * pseudo host names.
- *
- * @param replSet the mock replica set. Caller is responsible for managing
- * replSet and making sure that it lives longer than this object.
- */
- MockConnHook(MockConnRegistry* registry);
- ~MockConnHook();
-
- mongo::DBClientBase* connect(
- const mongo::ConnectionString& connString,
- std::string& errmsg, double socketTimeout);
-
- private:
- MockConnRegistry* _registry;
- };
-
- MockConnRegistry();
-
- static boost::scoped_ptr<MockConnRegistry> _instance;
-
- MockConnHook _mockConnStrHook;
-
- // protects _registry
- mongo::mutex _registryMutex;
- unordered_map<std::string, MockRemoteDBServer*> _registry;
+ MockConnRegistry* _registry;
};
+
+ MockConnRegistry();
+
+ static boost::scoped_ptr<MockConnRegistry> _instance;
+
+ MockConnHook _mockConnStrHook;
+
+ // protects _registry
+ mongo::mutex _registryMutex;
+ unordered_map<std::string, MockRemoteDBServer*> _registry;
+};
}
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index 28a04779d25..480738b13c9 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -37,167 +37,166 @@ using std::string;
using std::vector;
namespace mongo {
- MockDBClientConnection::MockDBClientConnection(MockRemoteDBServer* remoteServer,
- bool autoReconnect):
- _remoteServerInstanceID(remoteServer->getInstanceID()),
- _remoteServer(remoteServer),
- _isFailed(false),
- _sockCreationTime(mongo::curTimeMicros64()),
- _autoReconnect(autoReconnect) {
- }
-
- MockDBClientConnection::~MockDBClientConnection() {
- }
+MockDBClientConnection::MockDBClientConnection(MockRemoteDBServer* remoteServer, bool autoReconnect)
+ : _remoteServerInstanceID(remoteServer->getInstanceID()),
+ _remoteServer(remoteServer),
+ _isFailed(false),
+ _sockCreationTime(mongo::curTimeMicros64()),
+ _autoReconnect(autoReconnect) {}
- bool MockDBClientConnection::connect(const char* hostName, std::string& errmsg) {
- if (_remoteServer->isRunning()) {
- _remoteServerInstanceID = _remoteServer->getInstanceID();
- return true;
- }
+MockDBClientConnection::~MockDBClientConnection() {}
- errmsg.assign("cannot connect to " + _remoteServer->getServerAddress());
- return false;
+bool MockDBClientConnection::connect(const char* hostName, std::string& errmsg) {
+ if (_remoteServer->isRunning()) {
+ _remoteServerInstanceID = _remoteServer->getInstanceID();
+ return true;
}
- bool MockDBClientConnection::runCommand(const string& dbname, const BSONObj& cmdObj,
- BSONObj &info, int options) {
- checkConnection();
+ errmsg.assign("cannot connect to " + _remoteServer->getServerAddress());
+ return false;
+}
- try {
- return _remoteServer->runCommand(_remoteServerInstanceID, dbname, cmdObj,
- info, options);
- }
- catch (const mongo::SocketException&) {
- _isFailed = true;
- throw;
- }
+bool MockDBClientConnection::runCommand(const string& dbname,
+ const BSONObj& cmdObj,
+ BSONObj& info,
+ int options) {
+ checkConnection();
- return false;
+ try {
+ return _remoteServer->runCommand(_remoteServerInstanceID, dbname, cmdObj, info, options);
+ } catch (const mongo::SocketException&) {
+ _isFailed = true;
+ throw;
}
- std::auto_ptr<mongo::DBClientCursor> MockDBClientConnection::query(const string& ns,
- mongo::Query query,
- int nToReturn,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize) {
- checkConnection();
-
- try {
- mongo::BSONArray result(_remoteServer->query(_remoteServerInstanceID, ns, query,
- nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize));
-
- std::auto_ptr<mongo::DBClientCursor> cursor;
- cursor.reset(new MockDBClientCursor(this, result));
- return cursor;
- }
- catch (const mongo::SocketException&) {
- _isFailed = true;
- throw;
- }
-
- std::auto_ptr<mongo::DBClientCursor> nullPtr;
- return nullPtr;
- }
+ return false;
+}
- mongo::ConnectionString::ConnectionType MockDBClientConnection::type() const {
- return mongo::ConnectionString::CUSTOM;
- }
+std::auto_ptr<mongo::DBClientCursor> MockDBClientConnection::query(const string& ns,
+ mongo::Query query,
+ int nToReturn,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ int batchSize) {
+ checkConnection();
+
+ try {
+ mongo::BSONArray result(_remoteServer->query(_remoteServerInstanceID,
+ ns,
+ query,
+ nToReturn,
+ nToSkip,
+ fieldsToReturn,
+ queryOptions,
+ batchSize));
+
+ std::auto_ptr<mongo::DBClientCursor> cursor;
+ cursor.reset(new MockDBClientCursor(this, result));
+ return cursor;
+ } catch (const mongo::SocketException&) {
+ _isFailed = true;
+ throw;
+ }
+
+ std::auto_ptr<mongo::DBClientCursor> nullPtr;
+ return nullPtr;
+}
- bool MockDBClientConnection::isFailed() const {
- return _isFailed;
- }
+mongo::ConnectionString::ConnectionType MockDBClientConnection::type() const {
+ return mongo::ConnectionString::CUSTOM;
+}
- string MockDBClientConnection::getServerAddress() const {
- return _remoteServer->getServerAddress();
- }
+bool MockDBClientConnection::isFailed() const {
+ return _isFailed;
+}
- string MockDBClientConnection::toString() const {
- return _remoteServer->toString();
- }
+string MockDBClientConnection::getServerAddress() const {
+ return _remoteServer->getServerAddress();
+}
- unsigned long long MockDBClientConnection::query(stdx::function<void(const BSONObj&)> f,
- const string& ns,
- mongo::Query query,
- const BSONObj* fieldsToReturn,
- int queryOptions) {
- verify(false);
- return 0;
- }
+string MockDBClientConnection::toString() const {
+ return _remoteServer->toString();
+}
- unsigned long long MockDBClientConnection::query(stdx::function<void(
- mongo::DBClientCursorBatchIterator&)> f,
- const std::string& ns,
- mongo::Query query,
- const mongo::BSONObj* fieldsToReturn,
- int queryOptions) {
- verify(false);
- return 0;
- }
+unsigned long long MockDBClientConnection::query(stdx::function<void(const BSONObj&)> f,
+ const string& ns,
+ mongo::Query query,
+ const BSONObj* fieldsToReturn,
+ int queryOptions) {
+ verify(false);
+ return 0;
+}
- uint64_t MockDBClientConnection::getSockCreationMicroSec() const {
- return _sockCreationTime;
- }
+unsigned long long MockDBClientConnection::query(
+ stdx::function<void(mongo::DBClientCursorBatchIterator&)> f,
+ const std::string& ns,
+ mongo::Query query,
+ const mongo::BSONObj* fieldsToReturn,
+ int queryOptions) {
+ verify(false);
+ return 0;
+}
- void MockDBClientConnection::insert(const string &ns, BSONObj obj, int flags) {
- _remoteServer->insert(ns, obj, flags);
- }
+uint64_t MockDBClientConnection::getSockCreationMicroSec() const {
+ return _sockCreationTime;
+}
- void MockDBClientConnection::insert(const string &ns,
- const vector<BSONObj>& objList,
- int flags) {
- for (vector<BSONObj>::const_iterator iter = objList.begin();
- iter != objList.end(); ++iter) {
- insert(ns, *iter, flags);
- }
- }
+void MockDBClientConnection::insert(const string& ns, BSONObj obj, int flags) {
+ _remoteServer->insert(ns, obj, flags);
+}
- void MockDBClientConnection::remove(const string& ns, Query query, bool justOne) {
- remove(ns, query, (justOne ? RemoveOption_JustOne : 0));
+void MockDBClientConnection::insert(const string& ns, const vector<BSONObj>& objList, int flags) {
+ for (vector<BSONObj>::const_iterator iter = objList.begin(); iter != objList.end(); ++iter) {
+ insert(ns, *iter, flags);
}
+}
- void MockDBClientConnection::remove(const string& ns, Query query, int flags) {
- _remoteServer->remove(ns, query, flags);
- }
+void MockDBClientConnection::remove(const string& ns, Query query, bool justOne) {
+ remove(ns, query, (justOne ? RemoveOption_JustOne : 0));
+}
- void MockDBClientConnection::killCursor(long long cursorID) {
- verify(false); // unimplemented
- }
+void MockDBClientConnection::remove(const string& ns, Query query, int flags) {
+ _remoteServer->remove(ns, query, flags);
+}
- bool MockDBClientConnection::callRead(mongo::Message& toSend , mongo::Message& response) {
- verify(false); // unimplemented
- return false;
- }
+void MockDBClientConnection::killCursor(long long cursorID) {
+ verify(false); // unimplemented
+}
- bool MockDBClientConnection::call(mongo::Message& toSend,
- mongo::Message& response,
- bool assertOk,
- string* actualServer) {
- verify(false); // unimplemented
- return false;
- }
+bool MockDBClientConnection::callRead(mongo::Message& toSend, mongo::Message& response) {
+ verify(false); // unimplemented
+ return false;
+}
- void MockDBClientConnection::say(mongo::Message& toSend, bool isRetry, string* actualServer) {
- verify(false); // unimplemented
- }
+bool MockDBClientConnection::call(mongo::Message& toSend,
+ mongo::Message& response,
+ bool assertOk,
+ string* actualServer) {
+ verify(false); // unimplemented
+ return false;
+}
- void MockDBClientConnection::sayPiggyBack(mongo::Message& toSend) {
- verify(false); // unimplemented
- }
+void MockDBClientConnection::say(mongo::Message& toSend, bool isRetry, string* actualServer) {
+ verify(false); // unimplemented
+}
- bool MockDBClientConnection::lazySupported() const {
- verify(false); // unimplemented
- return false;
- }
+void MockDBClientConnection::sayPiggyBack(mongo::Message& toSend) {
+ verify(false); // unimplemented
+}
- double MockDBClientConnection::getSoTimeout() const {
- return 0;
- }
+bool MockDBClientConnection::lazySupported() const {
+ verify(false); // unimplemented
+ return false;
+}
- void MockDBClientConnection::checkConnection() {
- if (_isFailed && _autoReconnect) {
- _remoteServerInstanceID = _remoteServer->getInstanceID();
- }
+double MockDBClientConnection::getSoTimeout() const {
+ return 0;
+}
+
+void MockDBClientConnection::checkConnection() {
+ if (_isFailed && _autoReconnect) {
+ _remoteServerInstanceID = _remoteServer->getInstanceID();
}
}
+}
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index 8fa1a890c90..fb6112242a1 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -35,98 +35,105 @@
#include "mongo/dbtests/mock/mock_remote_db_server.h"
namespace mongo {
+/**
+ * A simple class for mocking mongo::DBClientConnection.
+ *
+ * Also check out sample usage in dbtests/mock_dbclient_conn_test.cpp
+ */
+class MockDBClientConnection : public mongo::DBClientConnection {
+public:
/**
- * A simple class for mocking mongo::DBClientConnection.
+ * Create a mock connection to a mock server.
*
- * Also check out sample usage in dbtests/mock_dbclient_conn_test.cpp
+ * @param remoteServer the remote server to connect to. The caller is
+ * responsible for making sure that the life of remoteServer is
+ * longer than this connection.
+ * @param autoReconnect will automatically re-establish connection the
+ * next time an operation is requested when the last operation caused
+ * this connection to fall into a failed state.
*/
- class MockDBClientConnection : public mongo::DBClientConnection {
- public:
- /**
- * Create a mock connection to a mock server.
- *
- * @param remoteServer the remote server to connect to. The caller is
- * responsible for making sure that the life of remoteServer is
- * longer than this connection.
- * @param autoReconnect will automatically re-establish connection the
- * next time an operation is requested when the last operation caused
- * this connection to fall into a failed state.
- */
- MockDBClientConnection(MockRemoteDBServer* remoteServer, bool autoReconnect = false);
- virtual ~MockDBClientConnection();
-
- //
- // DBClientBase methods
- //
-
- bool connect(const char* hostName, std::string& errmsg);
-
- inline bool connect(const HostAndPort& host, std::string& errmsg) {
- return connect(host.toString().c_str(), errmsg);
- }
-
- bool runCommand(const std::string& dbname, const mongo::BSONObj& cmdObj,
- mongo::BSONObj &info, int options = 0);
-
- std::auto_ptr<mongo::DBClientCursor> query(const std::string &ns,
- mongo::Query query = mongo::Query(),
- int nToReturn = 0,
- int nToSkip = 0,
- const mongo::BSONObj* fieldsToReturn = 0,
- int queryOptions = 0,
- int batchSize = 0);
-
- uint64_t getSockCreationMicroSec() const;
-
- virtual void insert(const std::string& ns, BSONObj obj, int flags = 0);
-
- virtual void insert(const std::string& ns, const std::vector<BSONObj>& objList, int flags = 0);
-
- virtual void remove(const std::string& ns, Query query, bool justOne = false);
-
- virtual void remove(const std::string& ns, Query query, int flags = 0);
-
- //
- // Getters
- //
-
- mongo::ConnectionString::ConnectionType type() const;
- bool isFailed() const;
- double getSoTimeout() const;
- std::string getServerAddress() const;
- std::string toString() const;
-
- //
- // Unsupported methods (defined to get rid of virtual function was hidden error)
- //
- unsigned long long query(stdx::function<void(const mongo::BSONObj&)> f,
- const std::string& ns, mongo::Query query,
- const mongo::BSONObj* fieldsToReturn = 0, int queryOptions = 0);
-
- unsigned long long query(stdx::function<void(mongo::DBClientCursorBatchIterator&)> f,
- const std::string& ns, mongo::Query query,
- const mongo::BSONObj* fieldsToReturn = 0,
- int queryOptions = 0);
-
- //
- // Unsupported methods (these are pure virtuals in the base class)
- //
-
- void killCursor(long long cursorID);
- bool callRead(mongo::Message& toSend , mongo::Message& response);
- bool call(mongo::Message& toSend, mongo::Message& response, bool assertOk = true,
- std::string* actualServer = 0);
- void say(mongo::Message& toSend, bool isRetry = false, std::string* actualServer = 0);
- void sayPiggyBack(mongo::Message& toSend);
- bool lazySupported() const;
-
- private:
- void checkConnection();
-
- MockRemoteDBServer::InstanceID _remoteServerInstanceID;
- MockRemoteDBServer* _remoteServer;
- bool _isFailed;
- uint64_t _sockCreationTime;
- bool _autoReconnect;
- };
+ MockDBClientConnection(MockRemoteDBServer* remoteServer, bool autoReconnect = false);
+ virtual ~MockDBClientConnection();
+
+ //
+ // DBClientBase methods
+ //
+
+ bool connect(const char* hostName, std::string& errmsg);
+
+ inline bool connect(const HostAndPort& host, std::string& errmsg) {
+ return connect(host.toString().c_str(), errmsg);
+ }
+
+ bool runCommand(const std::string& dbname,
+ const mongo::BSONObj& cmdObj,
+ mongo::BSONObj& info,
+ int options = 0);
+
+ std::auto_ptr<mongo::DBClientCursor> query(const std::string& ns,
+ mongo::Query query = mongo::Query(),
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const mongo::BSONObj* fieldsToReturn = 0,
+ int queryOptions = 0,
+ int batchSize = 0);
+
+ uint64_t getSockCreationMicroSec() const;
+
+ virtual void insert(const std::string& ns, BSONObj obj, int flags = 0);
+
+ virtual void insert(const std::string& ns, const std::vector<BSONObj>& objList, int flags = 0);
+
+ virtual void remove(const std::string& ns, Query query, bool justOne = false);
+
+ virtual void remove(const std::string& ns, Query query, int flags = 0);
+
+ //
+ // Getters
+ //
+
+ mongo::ConnectionString::ConnectionType type() const;
+ bool isFailed() const;
+ double getSoTimeout() const;
+ std::string getServerAddress() const;
+ std::string toString() const;
+
+ //
+ // Unsupported methods (defined to get rid of virtual function was hidden error)
+ //
+ unsigned long long query(stdx::function<void(const mongo::BSONObj&)> f,
+ const std::string& ns,
+ mongo::Query query,
+ const mongo::BSONObj* fieldsToReturn = 0,
+ int queryOptions = 0);
+
+ unsigned long long query(stdx::function<void(mongo::DBClientCursorBatchIterator&)> f,
+ const std::string& ns,
+ mongo::Query query,
+ const mongo::BSONObj* fieldsToReturn = 0,
+ int queryOptions = 0);
+
+ //
+ // Unsupported methods (these are pure virtuals in the base class)
+ //
+
+ void killCursor(long long cursorID);
+ bool callRead(mongo::Message& toSend, mongo::Message& response);
+ bool call(mongo::Message& toSend,
+ mongo::Message& response,
+ bool assertOk = true,
+ std::string* actualServer = 0);
+ void say(mongo::Message& toSend, bool isRetry = false, std::string* actualServer = 0);
+ void sayPiggyBack(mongo::Message& toSend);
+ bool lazySupported() const;
+
+private:
+ void checkConnection();
+
+ MockRemoteDBServer::InstanceID _remoteServerInstanceID;
+ MockRemoteDBServer* _remoteServer;
+ bool _isFailed;
+ uint64_t _sockCreationTime;
+ bool _autoReconnect;
+};
}
diff --git a/src/mongo/dbtests/mock/mock_dbclient_cursor.cpp b/src/mongo/dbtests/mock/mock_dbclient_cursor.cpp
index feab994cc7d..4105b866b02 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_cursor.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_cursor.cpp
@@ -30,18 +30,18 @@
#include "mongo/dbtests/mock/mock_dbclient_cursor.h"
namespace mongo {
- MockDBClientCursor::MockDBClientCursor(mongo::DBClientBase* client,
- const mongo::BSONArray& resultSet):
- mongo::DBClientCursor(client, "", 0, 0, 0) {
- _resultSet = resultSet.copy();
- _cursor.reset(new mongo::DBClientMockCursor(BSONArray(_resultSet)));
- }
+MockDBClientCursor::MockDBClientCursor(mongo::DBClientBase* client,
+ const mongo::BSONArray& resultSet)
+ : mongo::DBClientCursor(client, "", 0, 0, 0) {
+ _resultSet = resultSet.copy();
+ _cursor.reset(new mongo::DBClientMockCursor(BSONArray(_resultSet)));
+}
- bool MockDBClientCursor::more() {
- return _cursor->more();
- }
+bool MockDBClientCursor::more() {
+ return _cursor->more();
+}
- mongo::BSONObj MockDBClientCursor::next() {
- return _cursor->next();
- }
+mongo::BSONObj MockDBClientCursor::next() {
+ return _cursor->next();
+}
}
diff --git a/src/mongo/dbtests/mock/mock_dbclient_cursor.h b/src/mongo/dbtests/mock/mock_dbclient_cursor.h
index c8aa3a38bcf..ac59ca5c4e2 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_cursor.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_cursor.h
@@ -36,25 +36,24 @@
namespace mongo {
+/**
+ * Simple adapter class for mongo::DBClientMockCursor to mongo::DBClientCursor.
+ * Only supports more and next, the behavior of other operations are undefined.
+ */
+class MockDBClientCursor : public mongo::DBClientCursor {
+public:
+ MockDBClientCursor(mongo::DBClientBase* client, const mongo::BSONArray& mockCollection);
+
+ bool more();
+
/**
- * Simple adapter class for mongo::DBClientMockCursor to mongo::DBClientCursor.
- * Only supports more and next, the behavior of other operations are undefined.
+ * Note: has the same contract as DBClientCursor - returned BSONObj will
+ * become invalid when this cursor is destroyed.
*/
- class MockDBClientCursor: public mongo::DBClientCursor {
- public:
- MockDBClientCursor(mongo::DBClientBase* client,
- const mongo::BSONArray& mockCollection);
-
- bool more();
-
- /**
- * Note: has the same contract as DBClientCursor - returned BSONObj will
- * become invalid when this cursor is destroyed.
- */
- mongo::BSONObj next();
-
- private:
- boost::scoped_ptr<mongo::DBClientMockCursor> _cursor;
- mongo::BSONObj _resultSet;
- };
+ mongo::BSONObj next();
+
+private:
+ boost::scoped_ptr<mongo::DBClientMockCursor> _cursor;
+ mongo::BSONObj _resultSet;
+};
}
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index e80d5a827f8..11b1dfdff5f 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -37,200 +37,195 @@ using std::vector;
namespace mongo {
- MockRemoteDBServer::CircularBSONIterator::CircularBSONIterator(
- const vector<BSONObj>& replyVector) {
- for (std::vector<mongo::BSONObj>::const_iterator iter = replyVector.begin();
- iter != replyVector.end(); ++iter) {
- _replyObjs.push_back(iter->copy());
- }
-
- _iter = _replyObjs.begin();
+MockRemoteDBServer::CircularBSONIterator::CircularBSONIterator(const vector<BSONObj>& replyVector) {
+ for (std::vector<mongo::BSONObj>::const_iterator iter = replyVector.begin();
+ iter != replyVector.end();
+ ++iter) {
+ _replyObjs.push_back(iter->copy());
}
- BSONObj MockRemoteDBServer::CircularBSONIterator::next() {
- verify(_iter != _replyObjs.end());
+ _iter = _replyObjs.begin();
+}
- BSONObj reply = _iter->copy();
- ++_iter;
+BSONObj MockRemoteDBServer::CircularBSONIterator::next() {
+ verify(_iter != _replyObjs.end());
- if (_iter == _replyObjs.end()) {
- _iter = _replyObjs.begin();
- }
+ BSONObj reply = _iter->copy();
+ ++_iter;
- return reply;
+ if (_iter == _replyObjs.end()) {
+ _iter = _replyObjs.begin();
}
- MockRemoteDBServer::MockRemoteDBServer(const string& hostAndPort):
- _isRunning(true),
- _hostAndPort(hostAndPort),
- _delayMilliSec(0),
- _cmdCount(0),
- _queryCount(0),
- _instanceID(0) {
- insert(IdentityNS, BSON(HostField(hostAndPort)), 0);
- }
+ return reply;
+}
- MockRemoteDBServer::~MockRemoteDBServer() {
- }
+MockRemoteDBServer::MockRemoteDBServer(const string& hostAndPort)
+ : _isRunning(true),
+ _hostAndPort(hostAndPort),
+ _delayMilliSec(0),
+ _cmdCount(0),
+ _queryCount(0),
+ _instanceID(0) {
+ insert(IdentityNS, BSON(HostField(hostAndPort)), 0);
+}
- void MockRemoteDBServer::setDelay(long long milliSec) {
- scoped_spinlock sLock(_lock);
- _delayMilliSec = milliSec;
- }
+MockRemoteDBServer::~MockRemoteDBServer() {}
- void MockRemoteDBServer::shutdown() {
- scoped_spinlock sLock(_lock);
- _isRunning = false;
- }
+void MockRemoteDBServer::setDelay(long long milliSec) {
+ scoped_spinlock sLock(_lock);
+ _delayMilliSec = milliSec;
+}
- void MockRemoteDBServer::reboot() {
- scoped_spinlock sLock(_lock);
- _isRunning = true;
- _instanceID++;
- }
+void MockRemoteDBServer::shutdown() {
+ scoped_spinlock sLock(_lock);
+ _isRunning = false;
+}
- MockRemoteDBServer::InstanceID MockRemoteDBServer::getInstanceID() const {
- scoped_spinlock sLock(_lock);
- return _instanceID;
- }
+void MockRemoteDBServer::reboot() {
+ scoped_spinlock sLock(_lock);
+ _isRunning = true;
+ _instanceID++;
+}
- bool MockRemoteDBServer::isRunning() const {
- scoped_spinlock sLock(_lock);
- return _isRunning;
- }
+MockRemoteDBServer::InstanceID MockRemoteDBServer::getInstanceID() const {
+ scoped_spinlock sLock(_lock);
+ return _instanceID;
+}
- void MockRemoteDBServer::setCommandReply(const string& cmdName,
- const mongo::BSONObj& replyObj) {
- vector<BSONObj> replySequence;
- replySequence.push_back(replyObj);
- setCommandReply(cmdName, replySequence);
- }
+bool MockRemoteDBServer::isRunning() const {
+ scoped_spinlock sLock(_lock);
+ return _isRunning;
+}
- void MockRemoteDBServer::setCommandReply(const string& cmdName,
- const vector<BSONObj>& replySequence) {
- scoped_spinlock sLock(_lock);
- _cmdMap[cmdName].reset(new CircularBSONIterator(replySequence));
- }
+void MockRemoteDBServer::setCommandReply(const string& cmdName, const mongo::BSONObj& replyObj) {
+ vector<BSONObj> replySequence;
+ replySequence.push_back(replyObj);
+ setCommandReply(cmdName, replySequence);
+}
- void MockRemoteDBServer::insert(const string &ns, BSONObj obj, int flags) {
- scoped_spinlock sLock(_lock);
+void MockRemoteDBServer::setCommandReply(const string& cmdName,
+ const vector<BSONObj>& replySequence) {
+ scoped_spinlock sLock(_lock);
+ _cmdMap[cmdName].reset(new CircularBSONIterator(replySequence));
+}
- vector<BSONObj>& mockCollection = _dataMgr[ns];
- mockCollection.push_back(obj.copy());
- }
+void MockRemoteDBServer::insert(const string& ns, BSONObj obj, int flags) {
+ scoped_spinlock sLock(_lock);
- void MockRemoteDBServer::remove(const string& ns, Query query, int flags) {
- scoped_spinlock sLock(_lock);
- if (_dataMgr.count(ns) == 0) {
- return;
- }
+ vector<BSONObj>& mockCollection = _dataMgr[ns];
+ mockCollection.push_back(obj.copy());
+}
- _dataMgr.erase(ns);
+void MockRemoteDBServer::remove(const string& ns, Query query, int flags) {
+ scoped_spinlock sLock(_lock);
+ if (_dataMgr.count(ns) == 0) {
+ return;
}
- bool MockRemoteDBServer::runCommand(MockRemoteDBServer::InstanceID id,
- const string& dbname,
- const BSONObj& cmdObj,
- BSONObj &info,
- int options) {
- checkIfUp(id);
-
- // Get the name of the command - copied from _runCommands @ db/dbcommands.cpp
- BSONObj innerCmdObj;
- {
- mongo::BSONElement e = cmdObj.firstElement();
- if (e.type() == mongo::Object && (e.fieldName()[0] == '$'
- ? mongo::str::equals("query", e.fieldName()+1) :
- mongo::str::equals("query", e.fieldName()))) {
- innerCmdObj = e.embeddedObject();
- }
- else {
- innerCmdObj = cmdObj;
- }
- }
-
- string cmdName = innerCmdObj.firstElement().fieldName();
- uassert(16430, str::stream() << "no reply for cmd: " << cmdName,
- _cmdMap.count(cmdName) == 1);
-
- {
- scoped_spinlock sLock(_lock);
- info = _cmdMap[cmdName]->next();
- }
+ _dataMgr.erase(ns);
+}
- if (_delayMilliSec > 0) {
- mongo::sleepmillis(_delayMilliSec);
+bool MockRemoteDBServer::runCommand(MockRemoteDBServer::InstanceID id,
+ const string& dbname,
+ const BSONObj& cmdObj,
+ BSONObj& info,
+ int options) {
+ checkIfUp(id);
+
+ // Get the name of the command - copied from _runCommands @ db/dbcommands.cpp
+ BSONObj innerCmdObj;
+ {
+ mongo::BSONElement e = cmdObj.firstElement();
+ if (e.type() == mongo::Object &&
+ (e.fieldName()[0] == '$' ? mongo::str::equals("query", e.fieldName() + 1)
+ : mongo::str::equals("query", e.fieldName()))) {
+ innerCmdObj = e.embeddedObject();
+ } else {
+ innerCmdObj = cmdObj;
}
+ }
- checkIfUp(id);
+ string cmdName = innerCmdObj.firstElement().fieldName();
+ uassert(16430, str::stream() << "no reply for cmd: " << cmdName, _cmdMap.count(cmdName) == 1);
+ {
scoped_spinlock sLock(_lock);
- _cmdCount++;
- return info["ok"].trueValue();
+ info = _cmdMap[cmdName]->next();
}
- mongo::BSONArray MockRemoteDBServer::query(
- MockRemoteDBServer::InstanceID id,
- const string& ns,
- mongo::Query query,
- int nToReturn,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize) {
- checkIfUp(id);
-
- if (_delayMilliSec > 0) {
- mongo::sleepmillis(_delayMilliSec);
- }
+ if (_delayMilliSec > 0) {
+ mongo::sleepmillis(_delayMilliSec);
+ }
- checkIfUp(id);
+ checkIfUp(id);
- scoped_spinlock sLock(_lock);
- _queryCount++;
+ scoped_spinlock sLock(_lock);
+ _cmdCount++;
+ return info["ok"].trueValue();
+}
- const vector<BSONObj>& coll = _dataMgr[ns];
- BSONArrayBuilder result;
- for (vector<BSONObj>::const_iterator iter = coll.begin(); iter != coll.end(); ++ iter) {
- result.append(iter->copy());
- }
+mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
+ const string& ns,
+ mongo::Query query,
+ int nToReturn,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ int batchSize) {
+ checkIfUp(id);
- return BSONArray(result.obj());
+ if (_delayMilliSec > 0) {
+ mongo::sleepmillis(_delayMilliSec);
}
- mongo::ConnectionString::ConnectionType MockRemoteDBServer::type() const {
- return mongo::ConnectionString::CUSTOM;
- }
+ checkIfUp(id);
- size_t MockRemoteDBServer::getCmdCount() const {
- scoped_spinlock sLock(_lock);
- return _cmdCount;
- }
+ scoped_spinlock sLock(_lock);
+ _queryCount++;
- size_t MockRemoteDBServer::getQueryCount() const {
- scoped_spinlock sLock(_lock);
- return _queryCount;
+ const vector<BSONObj>& coll = _dataMgr[ns];
+ BSONArrayBuilder result;
+ for (vector<BSONObj>::const_iterator iter = coll.begin(); iter != coll.end(); ++iter) {
+ result.append(iter->copy());
}
- void MockRemoteDBServer::clearCounters() {
- scoped_spinlock sLock(_lock);
- _cmdCount = 0;
- _queryCount = 0;
- }
+ return BSONArray(result.obj());
+}
- string MockRemoteDBServer::getServerAddress() const {
- return _hostAndPort;
- }
+mongo::ConnectionString::ConnectionType MockRemoteDBServer::type() const {
+ return mongo::ConnectionString::CUSTOM;
+}
- string MockRemoteDBServer::toString() {
- return _hostAndPort;
- }
+size_t MockRemoteDBServer::getCmdCount() const {
+ scoped_spinlock sLock(_lock);
+ return _cmdCount;
+}
- void MockRemoteDBServer::checkIfUp(InstanceID id) const {
- scoped_spinlock sLock(_lock);
+size_t MockRemoteDBServer::getQueryCount() const {
+ scoped_spinlock sLock(_lock);
+ return _queryCount;
+}
- if (!_isRunning || id < _instanceID) {
- throw mongo::SocketException(mongo::SocketException::CLOSED, _hostAndPort);
- }
+void MockRemoteDBServer::clearCounters() {
+ scoped_spinlock sLock(_lock);
+ _cmdCount = 0;
+ _queryCount = 0;
+}
+
+string MockRemoteDBServer::getServerAddress() const {
+ return _hostAndPort;
+}
+
+string MockRemoteDBServer::toString() {
+ return _hostAndPort;
+}
+
+void MockRemoteDBServer::checkIfUp(InstanceID id) const {
+ scoped_spinlock sLock(_lock);
+
+ if (!_isRunning || id < _instanceID) {
+ throw mongo::SocketException(mongo::SocketException::CLOSED, _hostAndPort);
}
}
+}
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h
index 61074e4c857..6441e1c3f3c 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.h
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.h
@@ -39,203 +39,204 @@
namespace mongo {
- const std::string IdentityNS("local.me");
- const BSONField<std::string> HostField("host");
+const std::string IdentityNS("local.me");
+const BSONField<std::string> HostField("host");
+
+/**
+ * A very simple mock that acts like a database server. Every object keeps track of its own
+ * InstanceID, which initially starts at zero and increments every time it is restarted.
+ * This is primarily used for simulating the state of which old connections won't be able
+ * to talk to the sockets that has already been closed on this server.
+ *
+ * Note: All operations on this server are protected by a lock.
+ */
+class MockRemoteDBServer {
+public:
+ typedef size_t InstanceID;
/**
- * A very simple mock that acts like a database server. Every object keeps track of its own
- * InstanceID, which initially starts at zero and increments every time it is restarted.
- * This is primarily used for simulating the state of which old connections won't be able
- * to talk to the sockets that has already been closed on this server.
+ * Creates a new mock server. This can also be setup to work with the
+ * ConnectionString class by using mongo::MockConnRegistry as follows:
+ *
+ * ConnectionString::setConnectionHook(MockConnRegistry::get()->getConnStrHook());
+ * MockRemoteDBServer server("$a:27017");
+ * MockConnRegistry::get()->addServer(&server);
+ *
+ * This allows clients using the ConnectionString::connect interface to create
+ * connections to this server. The requirements to make this hook fully functional are:
*
- * Note: All operations on this server are protected by a lock.
+ * 1. hostAndPort of this server should start with $.
+ * 2. No other instance has the same hostAndPort as this.
+ *
+ * This server will also contain the hostAndPort inside the IdentityNS
+ * collection. This is convenient for testing query routing.
+ *
+ * @param hostAndPort the host name with port for this server.
+ *
+ * @see MockConnRegistry
*/
- class MockRemoteDBServer {
- public:
- typedef size_t InstanceID;
+ MockRemoteDBServer(const std::string& hostAndPort);
+ virtual ~MockRemoteDBServer();
- /**
- * Creates a new mock server. This can also be setup to work with the
- * ConnectionString class by using mongo::MockConnRegistry as follows:
- *
- * ConnectionString::setConnectionHook(MockConnRegistry::get()->getConnStrHook());
- * MockRemoteDBServer server("$a:27017");
- * MockConnRegistry::get()->addServer(&server);
- *
- * This allows clients using the ConnectionString::connect interface to create
- * connections to this server. The requirements to make this hook fully functional are:
- *
- * 1. hostAndPort of this server should start with $.
- * 2. No other instance has the same hostAndPort as this.
- *
- * This server will also contain the hostAndPort inside the IdentityNS
- * collection. This is convenient for testing query routing.
- *
- * @param hostAndPort the host name with port for this server.
- *
- * @see MockConnRegistry
- */
- MockRemoteDBServer(const std::string& hostAndPort);
- virtual ~MockRemoteDBServer();
+ //
+ // Connectivity methods
+ //
- //
- // Connectivity methods
- //
-
- /**
- * Set a delay for calls to query and runCommand
- */
- void setDelay(long long milliSec);
+ /**
+ * Set a delay for calls to query and runCommand
+ */
+ void setDelay(long long milliSec);
- /**
- * Shuts down this server. Any operations on this server with an InstanceID
- * less than or equal to the current one will throw a mongo::SocketException.
- * To bring the server up again, use the reboot method.
- */
- void shutdown();
+ /**
+ * Shuts down this server. Any operations on this server with an InstanceID
+ * less than or equal to the current one will throw a mongo::SocketException.
+ * To bring the server up again, use the reboot method.
+ */
+ void shutdown();
- /**
- * Increments the instanceID of this server.
- */
- void reboot();
+ /**
+ * Increments the instanceID of this server.
+ */
+ void reboot();
- /**
- * @return true if this server is running
- */
- bool isRunning() const;
+ /**
+ * @return true if this server is running
+ */
+ bool isRunning() const;
- //
- // Mocking methods
- //
+ //
+ // Mocking methods
+ //
- /**
- * Sets the reply for a command.
- *
- * @param cmdName the name of the command
- * @param replyObj the exact reply for the command
- */
- void setCommandReply(const std::string& cmdName,
- const mongo::BSONObj& replyObj);
+ /**
+ * Sets the reply for a command.
+ *
+ * @param cmdName the name of the command
+ * @param replyObj the exact reply for the command
+ */
+ void setCommandReply(const std::string& cmdName, const mongo::BSONObj& replyObj);
- /**
- * Sets the reply for a command.
- *
- * @param cmdName the name of the command.
- * @param replySequence the sequence of replies to cycle through every time
- * the given command is requested. This is useful for setting up a
- * sequence of response when the command can be called more than once
- * that requires different results when calling a method.
- */
- void setCommandReply(const std::string& cmdName,
- const std::vector<mongo::BSONObj>& replySequence);
+ /**
+ * Sets the reply for a command.
+ *
+ * @param cmdName the name of the command.
+ * @param replySequence the sequence of replies to cycle through every time
+ * the given command is requested. This is useful for setting up a
+ * sequence of response when the command can be called more than once
+ * that requires different results when calling a method.
+ */
+ void setCommandReply(const std::string& cmdName,
+ const std::vector<mongo::BSONObj>& replySequence);
- /**
- * Inserts a single document to this server.
- *
- * @param ns the namespace to insert the document to.
- * @param obj the document to insert.
- * @param flags ignored.
- */
- void insert(const std::string& ns, BSONObj obj, int flags = 0);
+ /**
+ * Inserts a single document to this server.
+ *
+ * @param ns the namespace to insert the document to.
+ * @param obj the document to insert.
+ * @param flags ignored.
+ */
+ void insert(const std::string& ns, BSONObj obj, int flags = 0);
- /**
- * Removes documents from this server.
- *
- * @param ns the namespace to remove documents from.
- * @param query ignored.
- * @param flags ignored.
- */
- void remove(const std::string& ns, Query query, int flags = 0);
-
- //
- // DBClientBase methods
- //
- bool runCommand(InstanceID id, const std::string& dbname,
- const mongo::BSONObj& cmdObj,
- mongo::BSONObj &info, int options = 0);
-
- mongo::BSONArray query(InstanceID id,
- const std::string &ns,
- mongo::Query query = mongo::Query(),
- int nToReturn = 0,
- int nToSkip = 0,
- const mongo::BSONObj* fieldsToReturn = 0,
- int queryOptions = 0,
- int batchSize = 0);
-
- //
- // Getters
- //
-
- InstanceID getInstanceID() const;
- mongo::ConnectionString::ConnectionType type() const;
- double getSoTimeout() const;
+ /**
+ * Removes documents from this server.
+ *
+ * @param ns the namespace to remove documents from.
+ * @param query ignored.
+ * @param flags ignored.
+ */
+ void remove(const std::string& ns, Query query, int flags = 0);
+
+ //
+ // DBClientBase methods
+ //
+ bool runCommand(InstanceID id,
+ const std::string& dbname,
+ const mongo::BSONObj& cmdObj,
+ mongo::BSONObj& info,
+ int options = 0);
+
+ mongo::BSONArray query(InstanceID id,
+ const std::string& ns,
+ mongo::Query query = mongo::Query(),
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const mongo::BSONObj* fieldsToReturn = 0,
+ int queryOptions = 0,
+ int batchSize = 0);
+
+ //
+ // Getters
+ //
+
+ InstanceID getInstanceID() const;
+ mongo::ConnectionString::ConnectionType type() const;
+ double getSoTimeout() const;
- /**
- * @return the exact std::string address passed to hostAndPort parameter of the
- * constructor. In other words, doesn't automatically append a
- * 'default' port if none is specified.
- */
- std::string getServerAddress() const;
- std::string toString();
+ /**
+ * @return the exact std::string address passed to hostAndPort parameter of the
+ * constructor. In other words, doesn't automatically append a
+ * 'default' port if none is specified.
+ */
+ std::string getServerAddress() const;
+ std::string toString();
- //
- // Call counters
- //
+ //
+ // Call counters
+ //
- size_t getCmdCount() const;
- size_t getQueryCount() const;
- void clearCounters();
+ size_t getCmdCount() const;
+ size_t getQueryCount() const;
+ void clearCounters();
- private:
+private:
+ /**
+ * A very simple class for cycling through a set of BSONObj
+ */
+ class CircularBSONIterator {
+ public:
/**
- * A very simple class for cycling through a set of BSONObj
+ * Creates a new iterator with a deep copy of the vector.
*/
- class CircularBSONIterator {
- public:
- /**
- * Creates a new iterator with a deep copy of the vector.
- */
- CircularBSONIterator(const std::vector<mongo::BSONObj>& replyVector);
- mongo::BSONObj next();
-
- private:
- std::vector<mongo::BSONObj>::iterator _iter;
- std::vector<mongo::BSONObj> _replyObjs;
- };
+ CircularBSONIterator(const std::vector<mongo::BSONObj>& replyVector);
+ mongo::BSONObj next();
- /**
- * Checks whether the instance of the server is still up.
- *
- * @throws mongo::SocketException if this server is down
- */
- void checkIfUp(InstanceID id) const;
+ private:
+ std::vector<mongo::BSONObj>::iterator _iter;
+ std::vector<mongo::BSONObj> _replyObjs;
+ };
+
+ /**
+ * Checks whether the instance of the server is still up.
+ *
+ * @throws mongo::SocketException if this server is down
+ */
+ void checkIfUp(InstanceID id) const;
- typedef unordered_map<std::string, boost::shared_ptr<CircularBSONIterator> > CmdToReplyObj;
- typedef unordered_map<std::string, std::vector<BSONObj> > MockDataMgr;
+ typedef unordered_map<std::string, boost::shared_ptr<CircularBSONIterator>> CmdToReplyObj;
+ typedef unordered_map<std::string, std::vector<BSONObj>> MockDataMgr;
- bool _isRunning;
+ bool _isRunning;
- const std::string _hostAndPort;
- long long _delayMilliSec;
+ const std::string _hostAndPort;
+ long long _delayMilliSec;
- //
- // Mock replies
- //
- CmdToReplyObj _cmdMap;
- MockDataMgr _dataMgr;
+ //
+ // Mock replies
+ //
+ CmdToReplyObj _cmdMap;
+ MockDataMgr _dataMgr;
- //
- // Op Counters
- //
- size_t _cmdCount;
- size_t _queryCount;
+ //
+ // Op Counters
+ //
+ size_t _cmdCount;
+ size_t _queryCount;
- // Unique id for every restart of this server used for rejecting requests from
- // connections that are still "connected" to the old instance
- InstanceID _instanceID;
+ // Unique id for every restart of this server used for rejecting requests from
+ // connections that are still "connected" to the old instance
+ InstanceID _instanceID;
- // protects this entire instance
- mutable mongo::SpinLock _lock;
- };
+ // protects this entire instance
+ mutable mongo::SpinLock _lock;
+};
}
diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp
index ea1ebde067a..afa44da2189 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.cpp
+++ b/src/mongo/dbtests/mock/mock_replica_set.cpp
@@ -38,287 +38,282 @@ using namespace mongo::repl;
namespace mongo {
- using std::string;
- using std::vector;
-
- MockReplicaSet::MockReplicaSet(const string& setName, size_t nodes):
- _setName(setName) {
- BSONObjBuilder configBuilder;
- configBuilder.append("_id", setName);
- configBuilder.append("version", 1);
-
- BSONArrayBuilder membersBuilder(configBuilder.subarrayStart("members"));
- for (size_t n = 0; n < nodes; n++) {
- std::stringstream str;
- str << "$" << setName << n << ":27017";
- const string hostName(str.str());
-
- if (n == 0) {
- _primaryHost = hostName;
- }
+using std::string;
+using std::vector;
- MockRemoteDBServer* mockServer = new MockRemoteDBServer(hostName);
- _nodeMap[hostName] = mockServer;
+MockReplicaSet::MockReplicaSet(const string& setName, size_t nodes) : _setName(setName) {
+ BSONObjBuilder configBuilder;
+ configBuilder.append("_id", setName);
+ configBuilder.append("version", 1);
- MockConnRegistry::get()->addServer(mockServer);
+ BSONArrayBuilder membersBuilder(configBuilder.subarrayStart("members"));
+ for (size_t n = 0; n < nodes; n++) {
+ std::stringstream str;
+ str << "$" << setName << n << ":27017";
+ const string hostName(str.str());
- membersBuilder.append(BSON("_id" << static_cast<int>(n) << "host" << hostName));
+ if (n == 0) {
+ _primaryHost = hostName;
}
- membersBuilder.done();
- ReplicaSetConfig replConfig;
- fassert(28566, replConfig.initialize(configBuilder.obj()));
- fassert(28573, replConfig.validate());
- setConfig(replConfig);
- }
+ MockRemoteDBServer* mockServer = new MockRemoteDBServer(hostName);
+ _nodeMap[hostName] = mockServer;
- MockReplicaSet::~MockReplicaSet() {
- for (ReplNodeMap::iterator iter = _nodeMap.begin();
- iter != _nodeMap.end(); ++iter) {
- MockConnRegistry::get()->removeServer(iter->second->getServerAddress());
- delete iter->second;
- }
+ MockConnRegistry::get()->addServer(mockServer);
+
+ membersBuilder.append(BSON("_id" << static_cast<int>(n) << "host" << hostName));
}
+ membersBuilder.done();
- string MockReplicaSet::getSetName() const {
- return _setName;
+ ReplicaSetConfig replConfig;
+ fassert(28566, replConfig.initialize(configBuilder.obj()));
+ fassert(28573, replConfig.validate());
+ setConfig(replConfig);
+}
+
+MockReplicaSet::~MockReplicaSet() {
+ for (ReplNodeMap::iterator iter = _nodeMap.begin(); iter != _nodeMap.end(); ++iter) {
+ MockConnRegistry::get()->removeServer(iter->second->getServerAddress());
+ delete iter->second;
}
+}
- string MockReplicaSet::getConnectionString() const {
- std::stringstream str;
- str << _setName;
- str << "/";
+string MockReplicaSet::getSetName() const {
+ return _setName;
+}
- ReplNodeMap::const_iterator iter = _nodeMap.begin();
- while (iter != _nodeMap.end()) {
- str << iter->second->getServerAddress();
- ++iter;
+string MockReplicaSet::getConnectionString() const {
+ std::stringstream str;
+ str << _setName;
+ str << "/";
- if (iter != _nodeMap.end()) {
- str << ",";
- }
- }
+ ReplNodeMap::const_iterator iter = _nodeMap.begin();
+ while (iter != _nodeMap.end()) {
+ str << iter->second->getServerAddress();
+ ++iter;
- return str.str();
+ if (iter != _nodeMap.end()) {
+ str << ",";
+ }
}
- vector<HostAndPort> MockReplicaSet::getHosts() const {
- vector<HostAndPort> list;
+ return str.str();
+}
- for (ReplNodeMap::const_iterator iter = _nodeMap.begin();
- iter != _nodeMap.end(); ++iter) {
- list.push_back(HostAndPort(iter->second->getServerAddress()));
- }
+vector<HostAndPort> MockReplicaSet::getHosts() const {
+ vector<HostAndPort> list;
- return list;
+ for (ReplNodeMap::const_iterator iter = _nodeMap.begin(); iter != _nodeMap.end(); ++iter) {
+ list.push_back(HostAndPort(iter->second->getServerAddress()));
}
- string MockReplicaSet::getPrimary() const {
- return _primaryHost;
- }
+ return list;
+}
- void MockReplicaSet::setPrimary(const string& hostAndPort) {
- const MemberConfig* config = _replConfig.findMemberByHostAndPort(HostAndPort(hostAndPort));
- fassert(16578, config);
+string MockReplicaSet::getPrimary() const {
+ return _primaryHost;
+}
- fassert(16579, !config->isHidden() && config->getPriority() > 0 && !config->isArbiter());
+void MockReplicaSet::setPrimary(const string& hostAndPort) {
+ const MemberConfig* config = _replConfig.findMemberByHostAndPort(HostAndPort(hostAndPort));
+ fassert(16578, config);
- _primaryHost = hostAndPort;
+ fassert(16579, !config->isHidden() && config->getPriority() > 0 && !config->isArbiter());
- mockIsMasterCmd();
- mockReplSetGetStatusCmd();
- }
+ _primaryHost = hostAndPort;
- vector<string> MockReplicaSet::getSecondaries() const {
- vector<string> secondaries;
+ mockIsMasterCmd();
+ mockReplSetGetStatusCmd();
+}
- for (ReplicaSetConfig::MemberIterator member = _replConfig.membersBegin();
- member != _replConfig.membersEnd(); ++member) {
- if (member->getHostAndPort() != HostAndPort(_primaryHost)) {
- secondaries.push_back(member->getHostAndPort().toString());
- }
- }
+vector<string> MockReplicaSet::getSecondaries() const {
+ vector<string> secondaries;
- return secondaries;
+ for (ReplicaSetConfig::MemberIterator member = _replConfig.membersBegin();
+ member != _replConfig.membersEnd();
+ ++member) {
+ if (member->getHostAndPort() != HostAndPort(_primaryHost)) {
+ secondaries.push_back(member->getHostAndPort().toString());
+ }
}
- MockRemoteDBServer* MockReplicaSet::getNode(const string& hostAndPort) {
- return mapFindWithDefault(_nodeMap, hostAndPort, static_cast<MockRemoteDBServer*>(NULL));
- }
+ return secondaries;
+}
- repl::ReplicaSetConfig MockReplicaSet::getReplConfig() const {
- return _replConfig;
- }
+MockRemoteDBServer* MockReplicaSet::getNode(const string& hostAndPort) {
+ return mapFindWithDefault(_nodeMap, hostAndPort, static_cast<MockRemoteDBServer*>(NULL));
+}
- void MockReplicaSet::setConfig(const repl::ReplicaSetConfig& newConfig) {
- _replConfig = newConfig;
- mockIsMasterCmd();
- mockReplSetGetStatusCmd();
- }
+repl::ReplicaSetConfig MockReplicaSet::getReplConfig() const {
+ return _replConfig;
+}
- void MockReplicaSet::kill(const string& hostAndPort) {
- verify(_nodeMap.count(hostAndPort) == 1);
- _nodeMap[hostAndPort]->shutdown();
- }
+void MockReplicaSet::setConfig(const repl::ReplicaSetConfig& newConfig) {
+ _replConfig = newConfig;
+ mockIsMasterCmd();
+ mockReplSetGetStatusCmd();
+}
- void MockReplicaSet::kill(const vector<string>& hostList) {
- for (vector<string>::const_iterator iter = hostList.begin();
- iter != hostList.end(); ++iter) {
- kill(*iter);
- }
- }
+void MockReplicaSet::kill(const string& hostAndPort) {
+ verify(_nodeMap.count(hostAndPort) == 1);
+ _nodeMap[hostAndPort]->shutdown();
+}
- void MockReplicaSet::restore(const string& hostAndPort) {
- verify(_nodeMap.count(hostAndPort) == 1);
- _nodeMap[hostAndPort]->reboot();
+void MockReplicaSet::kill(const vector<string>& hostList) {
+ for (vector<string>::const_iterator iter = hostList.begin(); iter != hostList.end(); ++iter) {
+ kill(*iter);
}
+}
- void MockReplicaSet::mockIsMasterCmd() {
- for (ReplNodeMap::iterator nodeIter = _nodeMap.begin();
- nodeIter != _nodeMap.end(); ++nodeIter) {
- const string& hostAndPort = nodeIter->first;
+void MockReplicaSet::restore(const string& hostAndPort) {
+ verify(_nodeMap.count(hostAndPort) == 1);
+ _nodeMap[hostAndPort]->reboot();
+}
- BSONObjBuilder builder;
- builder.append("setName", _setName);
+void MockReplicaSet::mockIsMasterCmd() {
+ for (ReplNodeMap::iterator nodeIter = _nodeMap.begin(); nodeIter != _nodeMap.end();
+ ++nodeIter) {
+ const string& hostAndPort = nodeIter->first;
- const MemberConfig* member = _replConfig.findMemberByHostAndPort(
- HostAndPort(hostAndPort));
- if (!member) {
- builder.append("ismaster", false);
- builder.append("secondary", false);
+ BSONObjBuilder builder;
+ builder.append("setName", _setName);
+ const MemberConfig* member = _replConfig.findMemberByHostAndPort(HostAndPort(hostAndPort));
+ if (!member) {
+ builder.append("ismaster", false);
+ builder.append("secondary", false);
+
+ vector<string> hostList;
+ builder.append("hosts", hostList);
+ } else {
+ const bool isPrimary = hostAndPort == getPrimary();
+ builder.append("ismaster", isPrimary);
+ builder.append("secondary", !isPrimary);
+
+ {
+ // TODO: add passives & arbiters
vector<string> hostList;
- builder.append("hosts", hostList);
- }
- else {
- const bool isPrimary = hostAndPort == getPrimary();
- builder.append("ismaster", isPrimary);
- builder.append("secondary", !isPrimary);
-
- {
- // TODO: add passives & arbiters
- vector<string> hostList;
- hostList.push_back(getPrimary());
-
- const vector<string> secondaries = getSecondaries();
- for (vector<string>::const_iterator secIter = secondaries.begin();
- secIter != secondaries.end(); ++secIter) {
- hostList.push_back(*secIter);
- }
+ hostList.push_back(getPrimary());
- builder.append("hosts", hostList);
+ const vector<string> secondaries = getSecondaries();
+ for (vector<string>::const_iterator secIter = secondaries.begin();
+ secIter != secondaries.end();
+ ++secIter) {
+ hostList.push_back(*secIter);
}
- builder.append("primary", getPrimary());
+ builder.append("hosts", hostList);
+ }
- if (member->isArbiter()) {
- builder.append("arbiterOnly", true);
- }
+ builder.append("primary", getPrimary());
- if (member->getPriority() == 0 && !member->isArbiter()) {
- builder.append("passive", true);
- }
+ if (member->isArbiter()) {
+ builder.append("arbiterOnly", true);
+ }
- if (member->getSlaveDelay().total_seconds()) {
- builder.append("slaveDelay", member->getSlaveDelay().total_seconds());
- }
+ if (member->getPriority() == 0 && !member->isArbiter()) {
+ builder.append("passive", true);
+ }
- if (member->isHidden()) {
- builder.append("hidden", true);
- }
+ if (member->getSlaveDelay().total_seconds()) {
+ builder.append("slaveDelay", member->getSlaveDelay().total_seconds());
+ }
- if (!member->shouldBuildIndexes()) {
- builder.append("buildIndexes", false);
- }
+ if (member->isHidden()) {
+ builder.append("hidden", true);
+ }
+
+ if (!member->shouldBuildIndexes()) {
+ builder.append("buildIndexes", false);
+ }
- const ReplicaSetTagConfig tagConfig = _replConfig.getTagConfig();
- if (member->hasTags(tagConfig)) {
- BSONObjBuilder tagBuilder;
- for (MemberConfig::TagIterator tag = member->tagsBegin();
- tag != member->tagsEnd(); ++tag) {
- std::string tagKey = tagConfig.getTagKey(*tag);
- if (tagKey[0] == '$') {
- // Filter out internal tags
- continue;
- }
- tagBuilder.append(tagKey, tagConfig.getTagValue(*tag));
+ const ReplicaSetTagConfig tagConfig = _replConfig.getTagConfig();
+ if (member->hasTags(tagConfig)) {
+ BSONObjBuilder tagBuilder;
+ for (MemberConfig::TagIterator tag = member->tagsBegin(); tag != member->tagsEnd();
+ ++tag) {
+ std::string tagKey = tagConfig.getTagKey(*tag);
+ if (tagKey[0] == '$') {
+ // Filter out internal tags
+ continue;
}
- builder.append("tags", tagBuilder.done());
+ tagBuilder.append(tagKey, tagConfig.getTagValue(*tag));
}
+ builder.append("tags", tagBuilder.done());
}
+ }
- builder.append("me", hostAndPort);
- builder.append("ok", true);
+ builder.append("me", hostAndPort);
+ builder.append("ok", true);
- nodeIter->second->setCommandReply("ismaster", builder.done());
- }
+ nodeIter->second->setCommandReply("ismaster", builder.done());
}
+}
- int MockReplicaSet::getState(const std::string& hostAndPort) const {
- if (!_replConfig.findMemberByHostAndPort(HostAndPort(hostAndPort))) {
- return static_cast<int>(MemberState::RS_REMOVED);
- }
- else if (hostAndPort == getPrimary()) {
- return static_cast<int>(MemberState::RS_PRIMARY);
- }
- else {
- return static_cast<int>(MemberState::RS_SECONDARY);
- }
+int MockReplicaSet::getState(const std::string& hostAndPort) const {
+ if (!_replConfig.findMemberByHostAndPort(HostAndPort(hostAndPort))) {
+ return static_cast<int>(MemberState::RS_REMOVED);
+ } else if (hostAndPort == getPrimary()) {
+ return static_cast<int>(MemberState::RS_PRIMARY);
+ } else {
+ return static_cast<int>(MemberState::RS_SECONDARY);
}
+}
- void MockReplicaSet::mockReplSetGetStatusCmd() {
- // Copied from ReplSetImpl::_summarizeStatus
- for (ReplNodeMap::iterator nodeIter = _nodeMap.begin();
- nodeIter != _nodeMap.end(); ++nodeIter) {
- MockRemoteDBServer* node = nodeIter->second;
- vector<BSONObj> hostsField;
+void MockReplicaSet::mockReplSetGetStatusCmd() {
+ // Copied from ReplSetImpl::_summarizeStatus
+ for (ReplNodeMap::iterator nodeIter = _nodeMap.begin(); nodeIter != _nodeMap.end();
+ ++nodeIter) {
+ MockRemoteDBServer* node = nodeIter->second;
+ vector<BSONObj> hostsField;
- BSONObjBuilder fullStatBuilder;
+ BSONObjBuilder fullStatBuilder;
- {
- BSONObjBuilder selfStatBuilder;
- selfStatBuilder.append("name", node->getServerAddress());
- selfStatBuilder.append("health", 1.0);
- selfStatBuilder.append("state", getState(node->getServerAddress()));
+ {
+ BSONObjBuilder selfStatBuilder;
+ selfStatBuilder.append("name", node->getServerAddress());
+ selfStatBuilder.append("health", 1.0);
+ selfStatBuilder.append("state", getState(node->getServerAddress()));
- selfStatBuilder.append("self", true);
- // TODO: _id, stateStr, uptime, optime, optimeDate, maintenanceMode, errmsg
+ selfStatBuilder.append("self", true);
+ // TODO: _id, stateStr, uptime, optime, optimeDate, maintenanceMode, errmsg
- hostsField.push_back(selfStatBuilder.obj());
- }
+ hostsField.push_back(selfStatBuilder.obj());
+ }
- for (ReplicaSetConfig::MemberIterator member = _replConfig.membersBegin();
- member != _replConfig.membersEnd(); ++member) {
- MockRemoteDBServer* hostNode = getNode(member->getHostAndPort().toString());
+ for (ReplicaSetConfig::MemberIterator member = _replConfig.membersBegin();
+ member != _replConfig.membersEnd();
+ ++member) {
+ MockRemoteDBServer* hostNode = getNode(member->getHostAndPort().toString());
- if (hostNode == node) {
- continue;
- }
+ if (hostNode == node) {
+ continue;
+ }
- BSONObjBuilder hostMemberBuilder;
+ BSONObjBuilder hostMemberBuilder;
- // TODO: _id, stateStr, uptime, optime, optimeDate, lastHeartbeat, pingMs
- // errmsg, authenticated
+ // TODO: _id, stateStr, uptime, optime, optimeDate, lastHeartbeat, pingMs
+ // errmsg, authenticated
- hostMemberBuilder.append("name", hostNode->getServerAddress());
- const double health = hostNode->isRunning() ? 1.0 : 0.0;
- hostMemberBuilder.append("health", health);
- hostMemberBuilder.append("state", getState(hostNode->getServerAddress()));
+ hostMemberBuilder.append("name", hostNode->getServerAddress());
+ const double health = hostNode->isRunning() ? 1.0 : 0.0;
+ hostMemberBuilder.append("health", health);
+ hostMemberBuilder.append("state", getState(hostNode->getServerAddress()));
- hostsField.push_back(hostMemberBuilder.obj());
- }
+ hostsField.push_back(hostMemberBuilder.obj());
+ }
- sort(hostsField.begin(), hostsField.end());
+ sort(hostsField.begin(), hostsField.end());
- // TODO: syncingTo
+ // TODO: syncingTo
- fullStatBuilder.append("set", _setName);
- fullStatBuilder.appendTimeT("date", time(0));
- fullStatBuilder.append("myState", getState(node->getServerAddress()));
- fullStatBuilder.append("members", hostsField);
- fullStatBuilder.append("ok", true);
+ fullStatBuilder.append("set", _setName);
+ fullStatBuilder.appendTimeT("date", time(0));
+ fullStatBuilder.append("myState", getState(node->getServerAddress()));
+ fullStatBuilder.append("members", hostsField);
+ fullStatBuilder.append("ok", true);
- node->setCommandReply("replSetGetStatus", fullStatBuilder.done());
- }
+ node->setCommandReply("replSetGetStatus", fullStatBuilder.done());
}
}
+}
diff --git a/src/mongo/dbtests/mock/mock_replica_set.h b/src/mongo/dbtests/mock/mock_replica_set.h
index 9c394c97648..c3f22c3cb97 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.h
+++ b/src/mongo/dbtests/mock/mock_replica_set.h
@@ -36,106 +36,105 @@
#include <vector>
namespace mongo {
+/**
+ * This is a helper class for managing a replica set consisting of
+ * MockRemoteDBServer instances.
+ *
+ * Note: Be sure to call ScopedDbConnection::clearPool() after every test
+ * when doing tests that involves the ReplicaSetMonitor. This is because
+ * it uses ScopedDbConnection which means you can have a residue connections
+ * that was created from previous tests and can cause a seg fault if the
+ * MockRemoteDBServer instances were already destroyed.
+ *
+ * Warning: Not thread-safe
+ */
+class MockReplicaSet {
+public:
+ /**
+ * Creates a mock replica set and automatically mocks the isMaster
+ * and replSetGetStatus commands based on the default replica set
+ * configuration.
+ *
+ * @param setName The name for this replica set
+ * @param nodes The initial number of nodes for this replica set
+ */
+ MockReplicaSet(const std::string& setName, size_t nodes);
+ ~MockReplicaSet();
+
+ //
+ // getters
+ //
+
+ std::string getSetName() const;
+ std::string getConnectionString() const;
+ std::vector<HostAndPort> getHosts() const;
+ repl::ReplicaSetConfig getReplConfig() const;
+ std::string getPrimary() const;
+ std::vector<std::string> getSecondaries() const;
+
+ /**
+ * Sets the configuration for this replica sets. This also has a side effect
+ * of mocking the ismaster and replSetGetStatus command responses based on
+ * the new config.
+ *
+ * Note: does not automatically select a new primary. Can be done manually by
+ * calling setPrimary.
+ */
+ void setConfig(const repl::ReplicaSetConfig& newConfig);
+
+ void setPrimary(const std::string& hostAndPort);
+
+ /**
+ * @return pointer to the mocked remote server with the given hostName.
+ * NULL if host doesn't exists.
+ */
+ MockRemoteDBServer* getNode(const std::string& hostAndPort);
+
+ /**
+ * Kills a node belonging to this set.
+ *
+ * @param hostName the name of the replica node to kill.
+ */
+ void kill(const std::string& hostAndPort);
+
/**
- * This is a helper class for managing a replica set consisting of
- * MockRemoteDBServer instances.
+ * Kills a set of host belonging to this set.
*
- * Note: Be sure to call ScopedDbConnection::clearPool() after every test
- * when doing tests that involves the ReplicaSetMonitor. This is because
- * it uses ScopedDbConnection which means you can have a residue connections
- * that was created from previous tests and can cause a seg fault if the
- * MockRemoteDBServer instances were already destroyed.
+ * @param hostList the list of host names of the servers to kill.
+ */
+ void kill(const std::vector<std::string>& hostList);
+
+ /**
+ * Reboots a node.
*
- * Warning: Not thread-safe
+ * @param hostName the name of the host to reboot.
+ */
+ void restore(const std::string& hostName);
+
+private:
+ typedef std::map<std::string, MockRemoteDBServer*> ReplNodeMap;
+
+ /**
+ * Mocks the ismaster command based on the information on the current
+ * replica set configuration.
*/
- class MockReplicaSet {
- public:
-
- /**
- * Creates a mock replica set and automatically mocks the isMaster
- * and replSetGetStatus commands based on the default replica set
- * configuration.
- *
- * @param setName The name for this replica set
- * @param nodes The initial number of nodes for this replica set
- */
- MockReplicaSet(const std::string& setName, size_t nodes);
- ~MockReplicaSet();
-
- //
- // getters
- //
-
- std::string getSetName() const;
- std::string getConnectionString() const;
- std::vector<HostAndPort> getHosts() const;
- repl::ReplicaSetConfig getReplConfig() const;
- std::string getPrimary() const;
- std::vector<std::string> getSecondaries() const;
-
- /**
- * Sets the configuration for this replica sets. This also has a side effect
- * of mocking the ismaster and replSetGetStatus command responses based on
- * the new config.
- *
- * Note: does not automatically select a new primary. Can be done manually by
- * calling setPrimary.
- */
- void setConfig(const repl::ReplicaSetConfig& newConfig);
-
- void setPrimary(const std::string& hostAndPort);
-
- /**
- * @return pointer to the mocked remote server with the given hostName.
- * NULL if host doesn't exists.
- */
- MockRemoteDBServer* getNode(const std::string& hostAndPort);
-
- /**
- * Kills a node belonging to this set.
- *
- * @param hostName the name of the replica node to kill.
- */
- void kill(const std::string& hostAndPort);
-
- /**
- * Kills a set of host belonging to this set.
- *
- * @param hostList the list of host names of the servers to kill.
- */
- void kill(const std::vector<std::string>& hostList);
-
- /**
- * Reboots a node.
- *
- * @param hostName the name of the host to reboot.
- */
- void restore(const std::string& hostName);
-
- private:
- typedef std::map<std::string, MockRemoteDBServer*> ReplNodeMap;
-
- /**
- * Mocks the ismaster command based on the information on the current
- * replica set configuration.
- */
- void mockIsMasterCmd();
-
- /**
- * Mocks the replSetGetStatus command based on the current states of the
- * mocked servers.
- */
- void mockReplSetGetStatusCmd();
-
- /**
- * @return the replica set state of the given host
- */
- int getState(const std::string& hostAndPort) const;
-
- const std::string _setName;
- ReplNodeMap _nodeMap;
- repl::ReplicaSetConfig _replConfig;
-
- std::string _primaryHost;
- };
+ void mockIsMasterCmd();
+
+ /**
+ * Mocks the replSetGetStatus command based on the current states of the
+ * mocked servers.
+ */
+ void mockReplSetGetStatusCmd();
+
+ /**
+ * @return the replica set state of the given host
+ */
+ int getState(const std::string& hostAndPort) const;
+
+ const std::string _setName;
+ ReplNodeMap _nodeMap;
+ repl::ReplicaSetConfig _replConfig;
+
+ std::string _primaryHost;
+};
}
diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
index 09a88ea316a..65672657f23 100644
--- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp
+++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
@@ -51,554 +51,573 @@ using std::vector;
namespace mongo_test {
- TEST(MockDBClientConnTest, ServerAddress) {
- MockRemoteDBServer server("test");
+TEST(MockDBClientConnTest, ServerAddress) {
+ MockRemoteDBServer server("test");
+ MockDBClientConnection conn(&server);
+
+ ASSERT_EQUALS("test", conn.getServerAddress());
+ ASSERT_EQUALS("test", conn.toString());
+}
+
+TEST(MockDBClientConnTest, QueryCount) {
+ MockRemoteDBServer server("test");
+
+ {
MockDBClientConnection conn(&server);
- ASSERT_EQUALS("test", conn.getServerAddress());
- ASSERT_EQUALS("test", conn.toString());
+ ASSERT_EQUALS(0U, server.getQueryCount());
+ conn.query("foo.bar");
}
- TEST(MockDBClientConnTest, QueryCount) {
- MockRemoteDBServer server("test");
+ ASSERT_EQUALS(1U, server.getQueryCount());
- {
- MockDBClientConnection conn(&server);
+ {
+ MockDBClientConnection conn(&server);
+ conn.query("foo.bar");
+ ASSERT_EQUALS(2U, server.getQueryCount());
+ }
+}
- ASSERT_EQUALS(0U, server.getQueryCount());
- conn.query("foo.bar");
- }
+TEST(MockDBClientConnTest, InsertAndQuery) {
+ MockRemoteDBServer server("test");
+ const string ns("test.user");
- ASSERT_EQUALS(1U, server.getQueryCount());
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+ ASSERT(!cursor->more());
- {
- MockDBClientConnection conn(&server);
- conn.query("foo.bar");
- ASSERT_EQUALS(2U, server.getQueryCount());
- }
+ server.insert(ns, BSON("x" << 1));
+ server.insert(ns, BSON("y" << 2));
}
- TEST(MockDBClientConnTest, InsertAndQuery) {
- MockRemoteDBServer server("test");
- const string ns("test.user");
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- ASSERT(!cursor->more());
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(1, firstDoc["x"].numberInt());
- server.insert(ns, BSON("x" << 1));
- server.insert(ns, BSON("y" << 2));
- }
+ ASSERT(cursor->more());
+ BSONObj secondDoc = cursor->next();
+ ASSERT_EQUALS(2, secondDoc["y"].numberInt());
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+ ASSERT(!cursor->more());
+ }
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(1, firstDoc["x"].numberInt());
+ // Make sure that repeated calls will still give you the same result
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- ASSERT(cursor->more());
- BSONObj secondDoc = cursor->next();
- ASSERT_EQUALS(2, secondDoc["y"].numberInt());
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(1, firstDoc["x"].numberInt());
- ASSERT(!cursor->more());
- }
+ ASSERT(cursor->more());
+ BSONObj secondDoc = cursor->next();
+ ASSERT_EQUALS(2, secondDoc["y"].numberInt());
- // Make sure that repeated calls will still give you the same result
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+ ASSERT(!cursor->more());
+ }
+}
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(1, firstDoc["x"].numberInt());
+TEST(MockDBClientConnTest, InsertAndQueryTwice) {
+ MockRemoteDBServer server("test");
+ const string ns("test.user");
- ASSERT(cursor->more());
- BSONObj secondDoc = cursor->next();
- ASSERT_EQUALS(2, secondDoc["y"].numberInt());
+ server.insert(ns, BSON("x" << 1));
- ASSERT(!cursor->more());
- }
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(1, firstDoc["x"].numberInt());
}
- TEST(MockDBClientConnTest, InsertAndQueryTwice) {
- MockRemoteDBServer server("test");
- const string ns("test.user");
+ server.insert(ns, BSON("y" << 2));
- server.insert(ns, BSON("x" << 1));
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(1, firstDoc["x"].numberInt());
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(1, firstDoc["x"].numberInt());
- }
+ ASSERT(cursor->more());
+ BSONObj secondDoc = cursor->next();
+ ASSERT_EQUALS(2, secondDoc["y"].numberInt());
- server.insert(ns, BSON("y" << 2));
+ ASSERT(!cursor->more());
+ }
+}
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+TEST(MockDBClientConnTest, QueryWithNoResults) {
+ MockRemoteDBServer server("test");
+ const string ns("test.user");
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(1, firstDoc["x"].numberInt());
+ server.insert(ns, BSON("x" << 1));
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query("other.ns");
- ASSERT(cursor->more());
- BSONObj secondDoc = cursor->next();
- ASSERT_EQUALS(2, secondDoc["y"].numberInt());
+ ASSERT(!cursor->more());
+}
- ASSERT(!cursor->more());
- }
- }
+TEST(MockDBClientConnTest, MultiNSInsertAndQuery) {
+ MockRemoteDBServer server("test");
+ const string ns1("test.user");
+ const string ns2("foo.bar");
+ const string ns3("mongo.db");
- TEST(MockDBClientConnTest, QueryWithNoResults) {
- MockRemoteDBServer server("test");
- const string ns("test.user");
+ {
+ MockDBClientConnection conn(&server);
+ conn.insert(ns1, BSON("a" << 1));
+ conn.insert(ns2,
+ BSON("ef"
+ << "gh"));
+ conn.insert(ns3, BSON("x" << 2));
+
+ conn.insert(ns1, BSON("b" << 3));
+ conn.insert(ns2,
+ BSON("jk"
+ << "lm"));
+
+ conn.insert(ns2,
+ BSON("x"
+ << "yz"));
+ }
- server.insert(ns, BSON("x" << 1));
+ {
MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query("other.ns");
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns1);
+
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(1, firstDoc["a"].numberInt());
+
+ ASSERT(cursor->more());
+ BSONObj secondDoc = cursor->next();
+ ASSERT_EQUALS(3, secondDoc["b"].numberInt());
ASSERT(!cursor->more());
}
- TEST(MockDBClientConnTest, MultiNSInsertAndQuery) {
- MockRemoteDBServer server("test");
- const string ns1("test.user");
- const string ns2("foo.bar");
- const string ns3("mongo.db");
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns2);
- {
- MockDBClientConnection conn(&server);
- conn.insert(ns1, BSON("a" << 1));
- conn.insert(ns2, BSON("ef" << "gh"));
- conn.insert(ns3, BSON("x" << 2));
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS("gh", firstDoc["ef"].String());
- conn.insert(ns1, BSON("b" << 3));
- conn.insert(ns2, BSON("jk" << "lm"));
+ ASSERT(cursor->more());
+ BSONObj secondDoc = cursor->next();
+ ASSERT_EQUALS("lm", secondDoc["jk"].String());
- conn.insert(ns2, BSON("x" << "yz"));
- }
+ ASSERT(cursor->more());
+ BSONObj thirdDoc = cursor->next();
+ ASSERT_EQUALS("yz", thirdDoc["x"].String());
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns1);
+ ASSERT(!cursor->more());
+ }
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(1, firstDoc["a"].numberInt());
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns3);
- ASSERT(cursor->more());
- BSONObj secondDoc = cursor->next();
- ASSERT_EQUALS(3, secondDoc["b"].numberInt());
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(2, firstDoc["x"].numberInt());
- ASSERT(!cursor->more());
- }
+ ASSERT(!cursor->more());
+ }
+}
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns2);
+TEST(MockDBClientConnTest, SimpleRemove) {
+ MockRemoteDBServer server("test");
+ const string ns("test.user");
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS("gh", firstDoc["ef"].String());
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+ ASSERT(!cursor->more());
- ASSERT(cursor->more());
- BSONObj secondDoc = cursor->next();
- ASSERT_EQUALS("lm", secondDoc["jk"].String());
+ conn.insert(ns, BSON("x" << 1));
+ conn.insert(ns, BSON("y" << 1));
+ }
- ASSERT(cursor->more());
- BSONObj thirdDoc = cursor->next();
- ASSERT_EQUALS("yz", thirdDoc["x"].String());
+ {
+ MockDBClientConnection conn(&server);
+ conn.remove(ns, Query(), false);
+ }
- ASSERT(!cursor->more());
- }
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns3);
+ ASSERT(!cursor->more());
+ }
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(2, firstDoc["x"].numberInt());
+ // Make sure that repeated calls will still give you the same result
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- ASSERT(!cursor->more());
- }
+ ASSERT(!cursor->more());
}
+}
- TEST(MockDBClientConnTest, SimpleRemove) {
- MockRemoteDBServer server("test");
- const string ns("test.user");
+TEST(MockDBClientConnTest, MultiNSRemove) {
+ MockRemoteDBServer server("test");
+ const string ns1("test.user");
+ const string ns2("foo.bar");
+ const string ns3("mongo.db");
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- ASSERT(!cursor->more());
+ {
+ MockDBClientConnection conn(&server);
+ conn.insert(ns1, BSON("a" << 1));
+ conn.insert(ns2,
+ BSON("ef"
+ << "gh"));
+ conn.insert(ns3, BSON("x" << 2));
+
+ conn.insert(ns1, BSON("b" << 3));
+ conn.insert(ns2,
+ BSON("jk"
+ << "lm"));
+
+ conn.insert(ns2,
+ BSON("x"
+ << "yz"));
+ }
- conn.insert(ns, BSON("x" << 1));
- conn.insert(ns, BSON("y" << 1));
- }
+ {
+ MockDBClientConnection conn(&server);
+ conn.remove(ns2, Query(), false);
- {
- MockDBClientConnection conn(&server);
- conn.remove(ns, Query(), false);
- }
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns2);
+ ASSERT(!cursor->more());
+ }
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns1);
- ASSERT(!cursor->more());
- }
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(1, firstDoc["a"].numberInt());
- // Make sure that repeated calls will still give you the same result
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
+ ASSERT(cursor->more());
+ BSONObj secondDoc = cursor->next();
+ ASSERT_EQUALS(3, secondDoc["b"].numberInt());
- ASSERT(!cursor->more());
- }
+ ASSERT(!cursor->more());
}
- TEST(MockDBClientConnTest, MultiNSRemove) {
- MockRemoteDBServer server("test");
- const string ns1("test.user");
- const string ns2("foo.bar");
- const string ns3("mongo.db");
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns3);
- {
- MockDBClientConnection conn(&server);
- conn.insert(ns1, BSON("a" << 1));
- conn.insert(ns2, BSON("ef" << "gh"));
- conn.insert(ns3, BSON("x" << 2));
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(2, firstDoc["x"].numberInt());
- conn.insert(ns1, BSON("b" << 3));
- conn.insert(ns2, BSON("jk" << "lm"));
+ ASSERT(!cursor->more());
+ }
+}
+
+TEST(MockDBClientConnTest, InsertAfterRemove) {
+ MockRemoteDBServer server("test");
+ const string ns("test.user");
- conn.insert(ns2, BSON("x" << "yz"));
- }
+ {
+ MockDBClientConnection conn(&server);
+ conn.insert(ns, BSON("a" << 1));
+ conn.insert(ns, BSON("b" << 3));
+ conn.insert(ns,
+ BSON("x"
+ << "yz"));
+ }
- {
- MockDBClientConnection conn(&server);
- conn.remove(ns2, Query(), false);
+ {
+ MockDBClientConnection conn(&server);
+ conn.remove(ns, Query(), false);
+ }
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns2);
- ASSERT(!cursor->more());
- }
+ {
+ MockDBClientConnection conn(&server);
+ conn.insert(ns, BSON("x" << 100));
+ }
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns1);
+ {
+ MockDBClientConnection conn(&server);
+ std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(1, firstDoc["a"].numberInt());
+ ASSERT(cursor->more());
+ BSONObj firstDoc = cursor->next();
+ ASSERT_EQUALS(100, firstDoc["x"].numberInt());
- ASSERT(cursor->more());
- BSONObj secondDoc = cursor->next();
- ASSERT_EQUALS(3, secondDoc["b"].numberInt());
+ ASSERT(!cursor->more());
+ }
+}
- ASSERT(!cursor->more());
- }
-
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns3);
-
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(2, firstDoc["x"].numberInt());
-
- ASSERT(!cursor->more());
- }
- }
-
- TEST(MockDBClientConnTest, InsertAfterRemove) {
- MockRemoteDBServer server("test");
- const string ns("test.user");
-
- {
- MockDBClientConnection conn(&server);
- conn.insert(ns, BSON("a" << 1));
- conn.insert(ns, BSON("b" << 3));
- conn.insert(ns, BSON("x" << "yz"));
- }
-
- {
- MockDBClientConnection conn(&server);
- conn.remove(ns, Query(), false);
- }
-
- {
- MockDBClientConnection conn(&server);
- conn.insert(ns, BSON("x" << 100));
- }
-
- {
- MockDBClientConnection conn(&server);
- std::auto_ptr<mongo::DBClientCursor> cursor = conn.query(ns);
-
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(100, firstDoc["x"].numberInt());
-
- ASSERT(!cursor->more());
- }
- }
-
- TEST(MockDBClientConnTest, SetCmdReply) {
- MockRemoteDBServer server("test");
- server.setCommandReply("serverStatus", BSON("ok" << 1 << "host" << "local"));
-
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
- ASSERT_EQUALS(1, response["ok"].numberInt());
- ASSERT_EQUALS("local", response["host"].str());
-
- ASSERT_EQUALS(1U, server.getCmdCount());
- }
-
- // Make sure that repeated calls will still give you the same result
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
- ASSERT_EQUALS(1, response["ok"].numberInt());
- ASSERT_EQUALS("local", response["host"].str());
-
- ASSERT_EQUALS(2U, server.getCmdCount());
- }
-
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
- ASSERT_EQUALS(1, response["ok"].numberInt());
- ASSERT_EQUALS("local", response["host"].str());
-
- ASSERT_EQUALS(3U, server.getCmdCount());
- }
- }
-
- TEST(MockDBClientConnTest, CyclingCmd) {
- MockRemoteDBServer server("test");
-
- {
- vector<BSONObj> isMasterSequence;
- isMasterSequence.push_back(BSON("set" << "a"
- << "isMaster" << true
- << "ok" << 1));
- isMasterSequence.push_back(BSON("set" << "a"
- << "isMaster" << false
- << "ok" << 1));
- server.setCommandReply("isMaster", isMasterSequence);
- }
-
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response));
- ASSERT_EQUALS(1, response["ok"].numberInt());
- ASSERT_EQUALS("a", response["set"].str());
- ASSERT(response["isMaster"].trueValue());
-
- ASSERT_EQUALS(1U, server.getCmdCount());
- }
-
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response));
- ASSERT_EQUALS(1, response["ok"].numberInt());
- ASSERT_EQUALS("a", response["set"].str());
- ASSERT(!response["isMaster"].trueValue());
-
- ASSERT_EQUALS(2U, server.getCmdCount());
- }
-
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response));
- ASSERT_EQUALS(1, response["ok"].numberInt());
- ASSERT_EQUALS("a", response["set"].str());
- ASSERT(response["isMaster"].trueValue());
-
- ASSERT_EQUALS(3U, server.getCmdCount());
- }
- }
-
- TEST(MockDBClientConnTest, CmdWithMultiFields) {
- MockRemoteDBServer server("test");
- server.setCommandReply("getLastError", BSON("ok" << 1 << "n" << 10));
+TEST(MockDBClientConnTest, SetCmdReply) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("serverStatus",
+ BSON("ok" << 1 << "host"
+ << "local"));
+ {
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo.baz", BSON("getLastError" << 1 << "w" << 2
- << "journal" << true), response));
+ ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
+ ASSERT_EQUALS(1, response["ok"].numberInt());
+ ASSERT_EQUALS("local", response["host"].str());
- ASSERT_EQUALS(10, response["n"].numberInt());
+ ASSERT_EQUALS(1U, server.getCmdCount());
}
- TEST(MockDBClientConnTest, BadCmd) {
- MockRemoteDBServer server("test");
- server.setCommandReply("getLastError", BSON("ok" << 0));
+ // Make sure that repeated calls will still give you the same result
+ {
+ MockDBClientConnection conn(&server);
+ BSONObj response;
+ ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
+ ASSERT_EQUALS(1, response["ok"].numberInt());
+ ASSERT_EQUALS("local", response["host"].str());
+
+ ASSERT_EQUALS(2U, server.getCmdCount());
+ }
+ {
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(!conn.runCommand("foo.baz", BSON("getLastError" << 1), response));
+ ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
+ ASSERT_EQUALS(1, response["ok"].numberInt());
+ ASSERT_EQUALS("local", response["host"].str());
+
+ ASSERT_EQUALS(3U, server.getCmdCount());
}
+}
- TEST(MockDBClientConnTest, MultipleStoredResponse) {
- MockRemoteDBServer server("test");
- server.setCommandReply("getLastError", BSON("ok" << 1 << "n" << 10));
- server.setCommandReply("isMaster", BSON("ok" << 1 << "secondary" << false));
+TEST(MockDBClientConnTest, CyclingCmd) {
+ MockRemoteDBServer server("test");
+
+ {
+ vector<BSONObj> isMasterSequence;
+ isMasterSequence.push_back(BSON("set"
+ << "a"
+ << "isMaster" << true << "ok" << 1));
+ isMasterSequence.push_back(BSON("set"
+ << "a"
+ << "isMaster" << false << "ok" << 1));
+ server.setCommandReply("isMaster", isMasterSequence);
+ }
+ {
MockDBClientConnection conn(&server);
- {
- BSONObj response;
- ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << "abc"), response));
- ASSERT(!response["secondary"].trueValue());
- }
+ BSONObj response;
+ ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response));
+ ASSERT_EQUALS(1, response["ok"].numberInt());
+ ASSERT_EQUALS("a", response["set"].str());
+ ASSERT(response["isMaster"].trueValue());
- {
- BSONObj response;
- ASSERT(conn.runCommand("a.b", BSON("getLastError" << 1), response));
- ASSERT_EQUALS(10, response["n"].numberInt());
- }
+ ASSERT_EQUALS(1U, server.getCmdCount());
}
- TEST(MockDBClientConnTest, CmdCount) {
- MockRemoteDBServer server("test");
- ASSERT_EQUALS(0U, server.getCmdCount());
+ {
+ MockDBClientConnection conn(&server);
+ BSONObj response;
+ ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response));
+ ASSERT_EQUALS(1, response["ok"].numberInt());
+ ASSERT_EQUALS("a", response["set"].str());
+ ASSERT(!response["isMaster"].trueValue());
- server.setCommandReply("serverStatus", BSON("ok" << 1));
+ ASSERT_EQUALS(2U, server.getCmdCount());
+ }
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
- ASSERT_EQUALS(1U, server.getCmdCount());
- }
+ {
+ MockDBClientConnection conn(&server);
+ BSONObj response;
+ ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response));
+ ASSERT_EQUALS(1, response["ok"].numberInt());
+ ASSERT_EQUALS("a", response["set"].str());
+ ASSERT(response["isMaster"].trueValue());
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT(conn.runCommand("baz.bar", BSON("serverStatus" << 1), response));
- ASSERT_EQUALS(2U, server.getCmdCount());
- }
+ ASSERT_EQUALS(3U, server.getCmdCount());
}
+}
- TEST(MockDBClientConnTest, Shutdown) {
- MockRemoteDBServer server("test");
- server.setCommandReply("serverStatus", BSON("ok" << 1));
- ASSERT(server.isRunning());
+TEST(MockDBClientConnTest, CmdWithMultiFields) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("getLastError", BSON("ok" << 1 << "n" << 10));
- {
- MockDBClientConnection conn(&server);
+ MockDBClientConnection conn(&server);
+ BSONObj response;
+ ASSERT(conn.runCommand(
+ "foo.baz", BSON("getLastError" << 1 << "w" << 2 << "journal" << true), response));
- server.shutdown();
- ASSERT(!server.isRunning());
+ ASSERT_EQUALS(10, response["n"].numberInt());
+}
+
+TEST(MockDBClientConnTest, BadCmd) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("getLastError", BSON("ok" << 0));
- ASSERT_THROWS(conn.query("test.user"), mongo::SocketException);
- }
+ MockDBClientConnection conn(&server);
+ BSONObj response;
+ ASSERT(!conn.runCommand("foo.baz", BSON("getLastError" << 1), response));
+}
- {
- MockDBClientConnection conn(&server);
- BSONObj response;
- ASSERT_THROWS(conn.runCommand("test.user",
- BSON("serverStatus" << 1), response), mongo::SocketException);
- }
+TEST(MockDBClientConnTest, MultipleStoredResponse) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("getLastError", BSON("ok" << 1 << "n" << 10));
+ server.setCommandReply("isMaster", BSON("ok" << 1 << "secondary" << false));
- ASSERT_EQUALS(0U, server.getQueryCount());
- ASSERT_EQUALS(0U, server.getCmdCount());
+ MockDBClientConnection conn(&server);
+ {
+ BSONObj response;
+ ASSERT(conn.runCommand("foo.baz",
+ BSON("isMaster"
+ << "abc"),
+ response));
+ ASSERT(!response["secondary"].trueValue());
+ }
+
+ {
+ BSONObj response;
+ ASSERT(conn.runCommand("a.b", BSON("getLastError" << 1), response));
+ ASSERT_EQUALS(10, response["n"].numberInt());
}
+}
- TEST(MockDBClientConnTest, Restart) {
- MockRemoteDBServer server("test");
- server.setCommandReply("serverStatus", BSON("ok" << 1));
+TEST(MockDBClientConnTest, CmdCount) {
+ MockRemoteDBServer server("test");
+ ASSERT_EQUALS(0U, server.getCmdCount());
- MockDBClientConnection conn1(&server);
+ server.setCommandReply("serverStatus", BSON("ok" << 1));
- // Do some queries and commands then check the counters later that
- // new instance still has it
- conn1.query("test.user");
+ {
+ MockDBClientConnection conn(&server);
BSONObj response;
- conn1.runCommand("test.user", BSON("serverStatus" << 1), response);
+ ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response));
+ ASSERT_EQUALS(1U, server.getCmdCount());
+ }
+
+ {
+ MockDBClientConnection conn(&server);
+ BSONObj response;
+ ASSERT(conn.runCommand("baz.bar", BSON("serverStatus" << 1), response));
+ ASSERT_EQUALS(2U, server.getCmdCount());
+ }
+}
+
+TEST(MockDBClientConnTest, Shutdown) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("serverStatus", BSON("ok" << 1));
+ ASSERT(server.isRunning());
+
+ {
+ MockDBClientConnection conn(&server);
server.shutdown();
- ASSERT_THROWS(conn1.query("test.user"), mongo::SocketException);
+ ASSERT(!server.isRunning());
- // New connections shouldn't work either
- MockDBClientConnection conn2(&server);
- ASSERT_THROWS(conn2.query("test.user"), mongo::SocketException);
+ ASSERT_THROWS(conn.query("test.user"), mongo::SocketException);
+ }
- ASSERT_EQUALS(1U, server.getQueryCount());
- ASSERT_EQUALS(1U, server.getCmdCount());
+ {
+ MockDBClientConnection conn(&server);
+ BSONObj response;
+ ASSERT_THROWS(conn.runCommand("test.user", BSON("serverStatus" << 1), response),
+ mongo::SocketException);
+ }
- server.reboot();
- ASSERT(server.isRunning());
+ ASSERT_EQUALS(0U, server.getQueryCount());
+ ASSERT_EQUALS(0U, server.getCmdCount());
+}
- {
- MockDBClientConnection conn(&server);
- conn.query("test.user");
- }
+TEST(MockDBClientConnTest, Restart) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("serverStatus", BSON("ok" << 1));
- // Old connections still shouldn't work
- ASSERT_THROWS(conn1.query("test.user"), mongo::SocketException);
- ASSERT_THROWS(conn2.query("test.user"), mongo::SocketException);
+ MockDBClientConnection conn1(&server);
- ASSERT_EQUALS(2U, server.getQueryCount());
- ASSERT_EQUALS(1U, server.getCmdCount());
- }
+ // Do some queries and commands then check the counters later that
+ // new instance still has it
+ conn1.query("test.user");
+ BSONObj response;
+ conn1.runCommand("test.user", BSON("serverStatus" << 1), response);
+
+ server.shutdown();
+ ASSERT_THROWS(conn1.query("test.user"), mongo::SocketException);
+
+ // New connections shouldn't work either
+ MockDBClientConnection conn2(&server);
+ ASSERT_THROWS(conn2.query("test.user"), mongo::SocketException);
- TEST(MockDBClientConnTest, ClearCounter) {
- MockRemoteDBServer server("test");
- server.setCommandReply("serverStatus", BSON("ok" << 1));
+ ASSERT_EQUALS(1U, server.getQueryCount());
+ ASSERT_EQUALS(1U, server.getCmdCount());
+ server.reboot();
+ ASSERT(server.isRunning());
+
+ {
MockDBClientConnection conn(&server);
conn.query("test.user");
- BSONObj response;
- conn.runCommand("test.user", BSON("serverStatus" << 1), response);
-
- server.clearCounters();
- ASSERT_EQUALS(0U, server.getQueryCount());
- ASSERT_EQUALS(0U, server.getCmdCount());
}
- TEST(MockDBClientConnTest, Delay) {
- MockRemoteDBServer server("test");
- server.setCommandReply("serverStatus", BSON("ok" << 1));
- server.setDelay(150);
+ // Old connections still shouldn't work
+ ASSERT_THROWS(conn1.query("test.user"), mongo::SocketException);
+ ASSERT_THROWS(conn2.query("test.user"), mongo::SocketException);
- MockDBClientConnection conn(&server);
+ ASSERT_EQUALS(2U, server.getQueryCount());
+ ASSERT_EQUALS(1U, server.getCmdCount());
+}
- {
- mongo::Timer timer;
- conn.query("x.x");
- const int nowInMilliSec = timer.millis();
- // Use a more lenient lower bound since some platforms like Windows
- // don't guarantee that sleeps will not wake up earlier (unlike
- // nanosleep we use for Linux)
- ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 130);
- }
+TEST(MockDBClientConnTest, ClearCounter) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("serverStatus", BSON("ok" << 1));
- {
- mongo::Timer timer;
- BSONObj response;
- conn.runCommand("x.x", BSON("serverStatus" << 1), response);
- const int nowInMilliSec = timer.millis();
- ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 130);
- }
+ MockDBClientConnection conn(&server);
+ conn.query("test.user");
+ BSONObj response;
+ conn.runCommand("test.user", BSON("serverStatus" << 1), response);
- ASSERT_EQUALS(1U, server.getQueryCount());
- ASSERT_EQUALS(1U, server.getCmdCount());
+ server.clearCounters();
+ ASSERT_EQUALS(0U, server.getQueryCount());
+ ASSERT_EQUALS(0U, server.getCmdCount());
+}
+
+TEST(MockDBClientConnTest, Delay) {
+ MockRemoteDBServer server("test");
+ server.setCommandReply("serverStatus", BSON("ok" << 1));
+ server.setDelay(150);
+
+ MockDBClientConnection conn(&server);
+
+ {
+ mongo::Timer timer;
+ conn.query("x.x");
+ const int nowInMilliSec = timer.millis();
+ // Use a more lenient lower bound since some platforms like Windows
+ // don't guarantee that sleeps will not wake up earlier (unlike
+ // nanosleep we use for Linux)
+ ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 130);
+ }
+
+ {
+ mongo::Timer timer;
+ BSONObj response;
+ conn.runCommand("x.x", BSON("serverStatus" << 1), response);
+ const int nowInMilliSec = timer.millis();
+ ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 130);
}
+
+ ASSERT_EQUALS(1U, server.getQueryCount());
+ ASSERT_EQUALS(1U, server.getCmdCount());
+}
}
diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp
index 6cda455fd3d..3b0ac16074c 100644
--- a/src/mongo/dbtests/mock_replica_set_test.cpp
+++ b/src/mongo/dbtests/mock_replica_set_test.cpp
@@ -48,100 +48,269 @@ using std::string;
using std::vector;
namespace mongo_test {
- TEST(MockReplicaSetTest, SetName) {
- MockReplicaSet replSet("n", 3);
- ASSERT_EQUALS("n", replSet.getSetName());
- }
+TEST(MockReplicaSetTest, SetName) {
+ MockReplicaSet replSet("n", 3);
+ ASSERT_EQUALS("n", replSet.getSetName());
+}
+
+TEST(MockReplicaSetTest, ConnectionString) {
+ MockReplicaSet replSet("n", 3);
+ ASSERT_EQUALS("n/$n0:27017,$n1:27017,$n2:27017", replSet.getConnectionString());
+}
+
+TEST(MockReplicaSetTest, GetNode) {
+ MockReplicaSet replSet("n", 3);
+ ASSERT_EQUALS("$n0:27017", replSet.getNode("$n0:27017")->getServerAddress());
+ ASSERT_EQUALS("$n1:27017", replSet.getNode("$n1:27017")->getServerAddress());
+ ASSERT_EQUALS("$n2:27017", replSet.getNode("$n2:27017")->getServerAddress());
+ ASSERT(replSet.getNode("$n3:27017") == NULL);
+}
- TEST(MockReplicaSetTest, ConnectionString) {
- MockReplicaSet replSet("n", 3);
- ASSERT_EQUALS("n/$n0:27017,$n1:27017,$n2:27017",
- replSet.getConnectionString());
+TEST(MockReplicaSetTest, IsMasterNode0) {
+ MockReplicaSet replSet("n", 3);
+ set<string> expectedHosts;
+ expectedHosts.insert("$n0:27017");
+ expectedHosts.insert("$n1:27017");
+ expectedHosts.insert("$n2:27017");
+
+ BSONObj cmdResponse;
+ MockRemoteDBServer* node = replSet.getNode("$n0:27017");
+ const MockRemoteDBServer::InstanceID id = node->getInstanceID();
+ bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
+ ASSERT(ok);
+
+ ASSERT(cmdResponse["ismaster"].trueValue());
+ ASSERT(!cmdResponse["secondary"].trueValue());
+ ASSERT_EQUALS("$n0:27017", cmdResponse["me"].str());
+ ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str());
+ ASSERT_EQUALS("n", cmdResponse["setName"].str());
+
+ set<string> hostList;
+ BSONObjIterator iter(cmdResponse["hosts"].embeddedObject());
+ while (iter.more()) {
+ hostList.insert(iter.next().str());
}
- TEST(MockReplicaSetTest, GetNode) {
- MockReplicaSet replSet("n", 3);
- ASSERT_EQUALS("$n0:27017", replSet.getNode("$n0:27017")->getServerAddress());
- ASSERT_EQUALS("$n1:27017", replSet.getNode("$n1:27017")->getServerAddress());
- ASSERT_EQUALS("$n2:27017", replSet.getNode("$n2:27017")->getServerAddress());
- ASSERT(replSet.getNode("$n3:27017") == NULL);
+ ASSERT(expectedHosts == hostList);
+}
+
+TEST(MockReplicaSetTest, IsMasterNode1) {
+ MockReplicaSet replSet("n", 3);
+ set<string> expectedHosts;
+ expectedHosts.insert("$n0:27017");
+ expectedHosts.insert("$n1:27017");
+ expectedHosts.insert("$n2:27017");
+
+ BSONObj cmdResponse;
+ MockRemoteDBServer* node = replSet.getNode("$n1:27017");
+ const MockRemoteDBServer::InstanceID id = node->getInstanceID();
+ bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
+ ASSERT(ok);
+
+ ASSERT(!cmdResponse["ismaster"].trueValue());
+ ASSERT(cmdResponse["secondary"].trueValue());
+ ASSERT_EQUALS("$n1:27017", cmdResponse["me"].str());
+ ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str());
+ ASSERT_EQUALS("n", cmdResponse["setName"].str());
+
+ set<string> hostList;
+ BSONObjIterator iter(cmdResponse["hosts"].embeddedObject());
+ while (iter.more()) {
+ hostList.insert(iter.next().str());
}
- TEST(MockReplicaSetTest, IsMasterNode0) {
- MockReplicaSet replSet("n", 3);
- set<string> expectedHosts;
- expectedHosts.insert("$n0:27017");
- expectedHosts.insert("$n1:27017");
- expectedHosts.insert("$n2:27017");
+ ASSERT(expectedHosts == hostList);
+}
- BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n0:27017");
- const MockRemoteDBServer::InstanceID id = node->getInstanceID();
- bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
- ASSERT(ok);
+TEST(MockReplicaSetTest, IsMasterNode2) {
+ MockReplicaSet replSet("n", 3);
+ set<string> expectedHosts;
+ expectedHosts.insert("$n0:27017");
+ expectedHosts.insert("$n1:27017");
+ expectedHosts.insert("$n2:27017");
+
+ BSONObj cmdResponse;
+ MockRemoteDBServer* node = replSet.getNode("$n2:27017");
+ const MockRemoteDBServer::InstanceID id = node->getInstanceID();
+ bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
+ ASSERT(ok);
+
+ ASSERT(!cmdResponse["ismaster"].trueValue());
+ ASSERT(cmdResponse["secondary"].trueValue());
+ ASSERT_EQUALS("$n2:27017", cmdResponse["me"].str());
+ ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str());
+ ASSERT_EQUALS("n", cmdResponse["setName"].str());
+
+ set<string> hostList;
+ BSONObjIterator iter(cmdResponse["hosts"].embeddedObject());
+ while (iter.more()) {
+ hostList.insert(iter.next().str());
+ }
- ASSERT(cmdResponse["ismaster"].trueValue());
- ASSERT(!cmdResponse["secondary"].trueValue());
- ASSERT_EQUALS("$n0:27017", cmdResponse["me"].str());
- ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str());
- ASSERT_EQUALS("n", cmdResponse["setName"].str());
+ ASSERT(expectedHosts == hostList);
+}
- set<string> hostList;
- BSONObjIterator iter(cmdResponse["hosts"].embeddedObject());
- while (iter.more()) {
- hostList.insert(iter.next().str());
+TEST(MockReplicaSetTest, ReplSetGetStatusNode0) {
+ MockReplicaSet replSet("n", 3);
+ set<string> expectedMembers;
+ expectedMembers.insert("$n0:27017");
+ expectedMembers.insert("$n1:27017");
+ expectedMembers.insert("$n2:27017");
+
+ BSONObj cmdResponse;
+ MockRemoteDBServer* node = replSet.getNode("$n0:27017");
+ const MockRemoteDBServer::InstanceID id = node->getInstanceID();
+ bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
+ ASSERT(ok);
+
+ ASSERT_EQUALS("n", cmdResponse["set"].str());
+ ASSERT_EQUALS(1, cmdResponse["myState"].numberInt());
+
+ set<string> memberList;
+ BSONObjIterator iter(cmdResponse["members"].embeddedObject());
+ while (iter.more()) {
+ BSONElement member(iter.next());
+ memberList.insert(member["name"].str());
+
+ if (member["self"].trueValue()) {
+ ASSERT_EQUALS(1, member["state"].numberInt());
+ ASSERT_EQUALS("$n0:27017", member["name"].str());
+ } else {
+ ASSERT_EQUALS(2, member["state"].numberInt());
}
+ }
- ASSERT(expectedHosts == hostList);
+ ASSERT(expectedMembers == memberList);
+}
+
+TEST(MockReplicaSetTest, ReplSetGetStatusNode1) {
+ MockReplicaSet replSet("n", 3);
+ set<string> expectedMembers;
+ expectedMembers.insert("$n0:27017");
+ expectedMembers.insert("$n1:27017");
+ expectedMembers.insert("$n2:27017");
+
+ BSONObj cmdResponse;
+ MockRemoteDBServer* node = replSet.getNode("$n1:27017");
+ const MockRemoteDBServer::InstanceID id = node->getInstanceID();
+ bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
+ ASSERT(ok);
+
+ ASSERT_EQUALS("n", cmdResponse["set"].str());
+ ASSERT_EQUALS(2, cmdResponse["myState"].numberInt());
+
+ set<string> memberList;
+ BSONObjIterator iter(cmdResponse["members"].embeddedObject());
+ while (iter.more()) {
+ BSONElement member(iter.next());
+ memberList.insert(member["name"].str());
+
+ if (member["self"].trueValue()) {
+ ASSERT_EQUALS(2, member["state"].numberInt());
+ ASSERT_EQUALS("$n1:27017", member["name"].str());
+ } else if (member["name"].str() == "$n0:27017") {
+ ASSERT_EQUALS(1, member["state"].numberInt());
+ } else {
+ ASSERT_EQUALS(2, member["state"].numberInt());
+ }
}
- TEST(MockReplicaSetTest, IsMasterNode1) {
- MockReplicaSet replSet("n", 3);
- set<string> expectedHosts;
- expectedHosts.insert("$n0:27017");
- expectedHosts.insert("$n1:27017");
- expectedHosts.insert("$n2:27017");
+ ASSERT(expectedMembers == memberList);
+}
- BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n1:27017");
- const MockRemoteDBServer::InstanceID id = node->getInstanceID();
- bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
- ASSERT(ok);
+TEST(MockReplicaSetTest, ReplSetGetStatusNode2) {
+ MockReplicaSet replSet("n", 3);
+ set<string> expectedMembers;
+ expectedMembers.insert("$n0:27017");
+ expectedMembers.insert("$n1:27017");
+ expectedMembers.insert("$n2:27017");
+
+ BSONObj cmdResponse;
+ MockRemoteDBServer* node = replSet.getNode("$n2:27017");
+ const MockRemoteDBServer::InstanceID id = node->getInstanceID();
+ bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
+ ASSERT(ok);
+
+ ASSERT_EQUALS("n", cmdResponse["set"].str());
+ ASSERT_EQUALS(2, cmdResponse["myState"].numberInt());
+
+ set<string> memberList;
+ BSONObjIterator iter(cmdResponse["members"].embeddedObject());
+ while (iter.more()) {
+ BSONElement member(iter.next());
+ memberList.insert(member["name"].str());
+
+ if (member["self"].trueValue()) {
+ ASSERT_EQUALS(2, member["state"].numberInt());
+ ASSERT_EQUALS("$n2:27017", member["name"].str());
+ } else if (member["name"].str() == "$n0:27017") {
+ ASSERT_EQUALS(1, member["state"].numberInt());
+ } else {
+ ASSERT_EQUALS(2, member["state"].numberInt());
+ }
+ }
- ASSERT(!cmdResponse["ismaster"].trueValue());
- ASSERT(cmdResponse["secondary"].trueValue());
- ASSERT_EQUALS("$n1:27017", cmdResponse["me"].str());
- ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str());
- ASSERT_EQUALS("n", cmdResponse["setName"].str());
+ ASSERT(expectedMembers == memberList);
+}
- set<string> hostList;
- BSONObjIterator iter(cmdResponse["hosts"].embeddedObject());
- while (iter.more()) {
- hostList.insert(iter.next().str());
+namespace {
+/**
+ * Takes a ReplicaSetConfig and a node to remove and returns a new config with equivalent
+ * members minus the one specified to be removed. NOTE: Does not copy over properties of the
+ * members other than their id and host.
+ */
+ReplicaSetConfig _getConfigWithMemberRemoved(const ReplicaSetConfig& oldConfig,
+ const HostAndPort& toRemove) {
+ BSONObjBuilder newConfigBuilder;
+ newConfigBuilder.append("_id", oldConfig.getReplSetName());
+ newConfigBuilder.append("version", oldConfig.getConfigVersion());
+
+ BSONArrayBuilder membersBuilder(newConfigBuilder.subarrayStart("members"));
+ for (ReplicaSetConfig::MemberIterator member = oldConfig.membersBegin();
+ member != oldConfig.membersEnd();
+ ++member) {
+ if (member->getHostAndPort() == toRemove) {
+ continue;
}
- ASSERT(expectedHosts == hostList);
+ membersBuilder.append(
+ BSON("_id" << member->getId() << "host" << member->getHostAndPort().toString()));
}
- TEST(MockReplicaSetTest, IsMasterNode2) {
- MockReplicaSet replSet("n", 3);
- set<string> expectedHosts;
- expectedHosts.insert("$n0:27017");
- expectedHosts.insert("$n1:27017");
- expectedHosts.insert("$n2:27017");
+ membersBuilder.done();
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(newConfigBuilder.obj()));
+ ASSERT_OK(newConfig.validate());
+ return newConfig;
+}
+} // namespace
+
+TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) {
+ MockReplicaSet replSet("n", 3);
+
+ ReplicaSetConfig oldConfig = replSet.getReplConfig();
+ const string hostToRemove("$n1:27017");
+ ReplicaSetConfig newConfig = _getConfigWithMemberRemoved(oldConfig, HostAndPort(hostToRemove));
+ replSet.setConfig(newConfig);
+ {
+ // Check isMaster for node still in set
BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n2:27017");
+ MockRemoteDBServer* node = replSet.getNode("$n0:27017");
const MockRemoteDBServer::InstanceID id = node->getInstanceID();
bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
ASSERT(ok);
- ASSERT(!cmdResponse["ismaster"].trueValue());
- ASSERT(cmdResponse["secondary"].trueValue());
- ASSERT_EQUALS("$n2:27017", cmdResponse["me"].str());
+ ASSERT(cmdResponse["ismaster"].trueValue());
+ ASSERT(!cmdResponse["secondary"].trueValue());
+ ASSERT_EQUALS("$n0:27017", cmdResponse["me"].str());
ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str());
ASSERT_EQUALS("n", cmdResponse["setName"].str());
+ set<string> expectedHosts;
+ expectedHosts.insert("$n0:27017");
+ expectedHosts.insert("$n2:27017");
+
set<string> hostList;
BSONObjIterator iter(cmdResponse["hosts"].embeddedObject());
while (iter.more()) {
@@ -149,51 +318,36 @@ namespace mongo_test {
}
ASSERT(expectedHosts == hostList);
+ ASSERT(hostList.count(hostToRemove) == 0);
}
- TEST(MockReplicaSetTest, ReplSetGetStatusNode0) {
- MockReplicaSet replSet("n", 3);
- set<string> expectedMembers;
- expectedMembers.insert("$n0:27017");
- expectedMembers.insert("$n1:27017");
- expectedMembers.insert("$n2:27017");
-
+ {
+ // Check isMaster for node still not in set anymore
BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n0:27017");
+ MockRemoteDBServer* node = replSet.getNode(hostToRemove);
const MockRemoteDBServer::InstanceID id = node->getInstanceID();
- bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
+ bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
ASSERT(ok);
- ASSERT_EQUALS("n", cmdResponse["set"].str());
- ASSERT_EQUALS(1, cmdResponse["myState"].numberInt());
-
- set<string> memberList;
- BSONObjIterator iter(cmdResponse["members"].embeddedObject());
- while (iter.more()) {
- BSONElement member(iter.next());
- memberList.insert(member["name"].str());
-
- if (member["self"].trueValue()) {
- ASSERT_EQUALS(1, member["state"].numberInt());
- ASSERT_EQUALS("$n0:27017", member["name"].str());
- }
- else {
- ASSERT_EQUALS(2, member["state"].numberInt());
- }
- }
-
- ASSERT(expectedMembers == memberList);
+ ASSERT(!cmdResponse["ismaster"].trueValue());
+ ASSERT(!cmdResponse["secondary"].trueValue());
+ ASSERT_EQUALS(hostToRemove, cmdResponse["me"].str());
+ ASSERT_EQUALS("n", cmdResponse["setName"].str());
}
+}
- TEST(MockReplicaSetTest, ReplSetGetStatusNode1) {
- MockReplicaSet replSet("n", 3);
- set<string> expectedMembers;
- expectedMembers.insert("$n0:27017");
- expectedMembers.insert("$n1:27017");
- expectedMembers.insert("$n2:27017");
+TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) {
+ MockReplicaSet replSet("n", 3);
+
+ ReplicaSetConfig oldConfig = replSet.getReplConfig();
+ const string hostToRemove("$n1:27017");
+ ReplicaSetConfig newConfig = _getConfigWithMemberRemoved(oldConfig, HostAndPort(hostToRemove));
+ replSet.setConfig(newConfig);
+ {
+ // Check replSetGetStatus for node still in set
BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n1:27017");
+ MockRemoteDBServer* node = replSet.getNode("$n2:27017");
const MockRemoteDBServer::InstanceID id = node->getInstanceID();
bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
ASSERT(ok);
@@ -209,223 +363,59 @@ namespace mongo_test {
if (member["self"].trueValue()) {
ASSERT_EQUALS(2, member["state"].numberInt());
- ASSERT_EQUALS("$n1:27017", member["name"].str());
- }
- else if (member["name"].str() == "$n0:27017") {
+ ASSERT_EQUALS("$n2:27017", member["name"].str());
+ } else if (member["name"].str() == "$n0:27017") {
ASSERT_EQUALS(1, member["state"].numberInt());
- }
- else {
+ } else {
ASSERT_EQUALS(2, member["state"].numberInt());
}
}
- ASSERT(expectedMembers == memberList);
- }
-
- TEST(MockReplicaSetTest, ReplSetGetStatusNode2) {
- MockReplicaSet replSet("n", 3);
set<string> expectedMembers;
expectedMembers.insert("$n0:27017");
- expectedMembers.insert("$n1:27017");
expectedMembers.insert("$n2:27017");
+ ASSERT(expectedMembers == memberList);
+ }
+ {
+ // Check replSetGetStatus for node still not in set anymore
BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n2:27017");
+ MockRemoteDBServer* node = replSet.getNode(hostToRemove);
const MockRemoteDBServer::InstanceID id = node->getInstanceID();
bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
ASSERT(ok);
ASSERT_EQUALS("n", cmdResponse["set"].str());
- ASSERT_EQUALS(2, cmdResponse["myState"].numberInt());
-
- set<string> memberList;
- BSONObjIterator iter(cmdResponse["members"].embeddedObject());
- while (iter.more()) {
- BSONElement member(iter.next());
- memberList.insert(member["name"].str());
-
- if (member["self"].trueValue()) {
- ASSERT_EQUALS(2, member["state"].numberInt());
- ASSERT_EQUALS("$n2:27017", member["name"].str());
- }
- else if (member["name"].str() == "$n0:27017") {
- ASSERT_EQUALS(1, member["state"].numberInt());
- }
- else {
- ASSERT_EQUALS(2, member["state"].numberInt());
- }
- }
-
- ASSERT(expectedMembers == memberList);
- }
-
-namespace {
- /**
- * Takes a ReplicaSetConfig and a node to remove and returns a new config with equivalent
- * members minus the one specified to be removed. NOTE: Does not copy over properties of the
- * members other than their id and host.
- */
- ReplicaSetConfig _getConfigWithMemberRemoved(
- const ReplicaSetConfig& oldConfig, const HostAndPort& toRemove) {
- BSONObjBuilder newConfigBuilder;
- newConfigBuilder.append("_id", oldConfig.getReplSetName());
- newConfigBuilder.append("version", oldConfig.getConfigVersion());
-
- BSONArrayBuilder membersBuilder(newConfigBuilder.subarrayStart("members"));
- for (ReplicaSetConfig::MemberIterator member = oldConfig.membersBegin();
- member != oldConfig.membersEnd(); ++member) {
- if (member->getHostAndPort() == toRemove) {
- continue;
- }
-
- membersBuilder.append(BSON("_id" << member->getId() <<
- "host" << member->getHostAndPort().toString()));
- }
-
- membersBuilder.done();
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(newConfigBuilder.obj()));
- ASSERT_OK(newConfig.validate());
- return newConfig;
- }
-} // namespace
-
- TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) {
- MockReplicaSet replSet("n", 3);
-
- ReplicaSetConfig oldConfig = replSet.getReplConfig();
- const string hostToRemove("$n1:27017");
- ReplicaSetConfig newConfig = _getConfigWithMemberRemoved(oldConfig,
- HostAndPort(hostToRemove));
- replSet.setConfig(newConfig);
-
- {
- // Check isMaster for node still in set
- BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n0:27017");
- const MockRemoteDBServer::InstanceID id = node->getInstanceID();
- bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
- ASSERT(ok);
-
- ASSERT(cmdResponse["ismaster"].trueValue());
- ASSERT(!cmdResponse["secondary"].trueValue());
- ASSERT_EQUALS("$n0:27017", cmdResponse["me"].str());
- ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str());
- ASSERT_EQUALS("n", cmdResponse["setName"].str());
-
- set<string> expectedHosts;
- expectedHosts.insert("$n0:27017");
- expectedHosts.insert("$n2:27017");
-
- set<string> hostList;
- BSONObjIterator iter(cmdResponse["hosts"].embeddedObject());
- while (iter.more()) {
- hostList.insert(iter.next().str());
- }
-
- ASSERT(expectedHosts == hostList);
- ASSERT(hostList.count(hostToRemove) == 0);
- }
-
- {
- // Check isMaster for node still not in set anymore
- BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode(hostToRemove);
- const MockRemoteDBServer::InstanceID id = node->getInstanceID();
- bool ok = node->runCommand(id, "foo.bar", BSON("ismaster" << 1), cmdResponse);
- ASSERT(ok);
-
- ASSERT(!cmdResponse["ismaster"].trueValue());
- ASSERT(!cmdResponse["secondary"].trueValue());
- ASSERT_EQUALS(hostToRemove, cmdResponse["me"].str());
- ASSERT_EQUALS("n", cmdResponse["setName"].str());
- }
+ ASSERT_EQUALS(10, cmdResponse["myState"].numberInt());
}
+}
- TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) {
- MockReplicaSet replSet("n", 3);
-
- ReplicaSetConfig oldConfig = replSet.getReplConfig();
- const string hostToRemove("$n1:27017");
- ReplicaSetConfig newConfig = _getConfigWithMemberRemoved(oldConfig,
- HostAndPort(hostToRemove));
- replSet.setConfig(newConfig);
-
- {
- // Check replSetGetStatus for node still in set
- BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode("$n2:27017");
- const MockRemoteDBServer::InstanceID id = node->getInstanceID();
- bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
- ASSERT(ok);
-
- ASSERT_EQUALS("n", cmdResponse["set"].str());
- ASSERT_EQUALS(2, cmdResponse["myState"].numberInt());
-
- set<string> memberList;
- BSONObjIterator iter(cmdResponse["members"].embeddedObject());
- while (iter.more()) {
- BSONElement member(iter.next());
- memberList.insert(member["name"].str());
-
- if (member["self"].trueValue()) {
- ASSERT_EQUALS(2, member["state"].numberInt());
- ASSERT_EQUALS("$n2:27017", member["name"].str());
- }
- else if (member["name"].str() == "$n0:27017") {
- ASSERT_EQUALS(1, member["state"].numberInt());
- }
- else {
- ASSERT_EQUALS(2, member["state"].numberInt());
- }
- }
+TEST(MockReplicaSetTest, KillNode) {
+ MockReplicaSet replSet("n", 3);
+ const string priHostName(replSet.getPrimary());
+ replSet.kill(priHostName);
- set<string> expectedMembers;
- expectedMembers.insert("$n0:27017");
- expectedMembers.insert("$n2:27017");
- ASSERT(expectedMembers == memberList);
- }
+ ASSERT(!replSet.getNode(priHostName)->isRunning());
- {
- // Check replSetGetStatus for node still not in set anymore
- BSONObj cmdResponse;
- MockRemoteDBServer* node = replSet.getNode(hostToRemove);
- const MockRemoteDBServer::InstanceID id = node->getInstanceID();
- bool ok = node->runCommand(id, "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse);
- ASSERT(ok);
-
- ASSERT_EQUALS("n", cmdResponse["set"].str());
- ASSERT_EQUALS(10, cmdResponse["myState"].numberInt());
- }
+ const vector<string> secondaries = replSet.getSecondaries();
+ for (vector<string>::const_iterator iter = secondaries.begin(); iter != secondaries.end();
+ ++iter) {
+ ASSERT(replSet.getNode(*iter)->isRunning());
}
+}
- TEST(MockReplicaSetTest, KillNode) {
- MockReplicaSet replSet("n", 3);
- const string priHostName(replSet.getPrimary());
- replSet.kill(priHostName);
+TEST(MockReplicaSetTest, KillMultipleNode) {
+ MockReplicaSet replSet("n", 3);
- ASSERT(!replSet.getNode(priHostName)->isRunning());
+ const vector<string> secondaries = replSet.getSecondaries();
+ replSet.kill(replSet.getSecondaries());
- const vector<string> secondaries = replSet.getSecondaries();
- for (vector<string>::const_iterator iter = secondaries.begin();
- iter != secondaries.end(); ++iter) {
- ASSERT(replSet.getNode(*iter)->isRunning());
- }
+ for (vector<string>::const_iterator iter = secondaries.begin(); iter != secondaries.end();
+ ++iter) {
+ ASSERT(!replSet.getNode(*iter)->isRunning());
}
- TEST(MockReplicaSetTest, KillMultipleNode) {
- MockReplicaSet replSet("n", 3);
-
- const vector<string> secondaries = replSet.getSecondaries();
- replSet.kill(replSet.getSecondaries());
-
- for (vector<string>::const_iterator iter = secondaries.begin();
- iter != secondaries.end(); ++iter) {
- ASSERT(!replSet.getNode(*iter)->isRunning());
- }
-
- const string priHostName(replSet.getPrimary());
- ASSERT(replSet.getNode(priHostName)->isRunning());
- }
+ const string priHostName(replSet.getPrimary());
+ ASSERT(replSet.getNode(priHostName)->isRunning());
+}
}
-
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 1017be597e5..c58db812480 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -57,84 +57,89 @@
namespace NamespaceTests {
- using std::string;
+using std::string;
+
+const int MinExtentSize = 4096;
+
+namespace MissingFieldTests {
+
+/** A missing field is represented as null in a btree index. */
+class BtreeIndexMissingField {
+public:
+ void run() {
+ OperationContextImpl txn;
+ BSONObj spec(BSON("key" << BSON("a" << 1)));
+ ASSERT_EQUALS(jstNULL,
+ IndexLegacy::getMissingField(&txn, NULL, spec).firstElement().type());
+ }
+};
+
+/** A missing field is represented as null in a 2d index. */
+class TwoDIndexMissingField {
+public:
+ void run() {
+ OperationContextImpl txn;
+ BSONObj spec(BSON("key" << BSON("a"
+ << "2d")));
+ ASSERT_EQUALS(jstNULL,
+ IndexLegacy::getMissingField(&txn, NULL, spec).firstElement().type());
+ }
+};
+
+/** A missing field is represented with the hash of null in a hashed index. */
+class HashedIndexMissingField {
+public:
+ void run() {
+ OperationContextImpl txn;
+ BSONObj spec(BSON("key" << BSON("a"
+ << "hashed")));
+ BSONObj nullObj = BSON("a" << BSONNULL);
+
+ // Call getKeys on the nullObj.
+ BSONObjSet nullFieldKeySet;
+ ExpressionKeysPrivate::getHashKeys(nullObj, "a", 0, 0, false, &nullFieldKeySet);
+ BSONElement nullFieldFromKey = nullFieldKeySet.begin()->firstElement();
+
+ ASSERT_EQUALS(ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), 0, 0),
+ nullFieldFromKey.Long());
+
+ BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL, spec);
+ ASSERT_EQUALS(NumberLong, missingField.firstElement().type());
+ ASSERT_EQUALS(nullFieldFromKey, missingField.firstElement());
+ }
+};
- const int MinExtentSize = 4096;
-
- namespace MissingFieldTests {
-
- /** A missing field is represented as null in a btree index. */
- class BtreeIndexMissingField {
- public:
- void run() {
- OperationContextImpl txn;
- BSONObj spec( BSON("key" << BSON( "a" << 1 ) ));
- ASSERT_EQUALS(jstNULL, IndexLegacy::getMissingField(&txn, NULL,spec).firstElement().type());
- }
- };
-
- /** A missing field is represented as null in a 2d index. */
- class TwoDIndexMissingField {
- public:
- void run() {
- OperationContextImpl txn;
- BSONObj spec( BSON("key" << BSON( "a" << "2d" ) ));
- ASSERT_EQUALS(jstNULL, IndexLegacy::getMissingField(&txn, NULL,spec).firstElement().type());
- }
- };
-
- /** A missing field is represented with the hash of null in a hashed index. */
- class HashedIndexMissingField {
- public:
- void run() {
- OperationContextImpl txn;
- BSONObj spec( BSON("key" << BSON( "a" << "hashed" ) ));
- BSONObj nullObj = BSON( "a" << BSONNULL );
-
- // Call getKeys on the nullObj.
- BSONObjSet nullFieldKeySet;
- ExpressionKeysPrivate::getHashKeys(nullObj, "a", 0, 0, false, &nullFieldKeySet);
- BSONElement nullFieldFromKey = nullFieldKeySet.begin()->firstElement();
-
- ASSERT_EQUALS( ExpressionKeysPrivate::makeSingleHashKey( nullObj.firstElement(), 0, 0 ),
- nullFieldFromKey.Long() );
-
- BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL,spec);
- ASSERT_EQUALS( NumberLong, missingField.firstElement().type() );
- ASSERT_EQUALS( nullFieldFromKey, missingField.firstElement());
- }
- };
-
- /**
- * A missing field is represented with the hash of null in a hashed index. This hash value
- * depends on the hash seed.
- */
- class HashedIndexMissingFieldAlternateSeed {
- public:
- void run() {
- OperationContextImpl txn;
- BSONObj spec( BSON("key" << BSON( "a" << "hashed" ) << "seed" << 0x5eed ));
- BSONObj nullObj = BSON( "a" << BSONNULL );
-
- BSONObjSet nullFieldKeySet;
- ExpressionKeysPrivate::getHashKeys(nullObj, "a", 0x5eed, 0, false, &nullFieldKeySet);
- BSONElement nullFieldFromKey = nullFieldKeySet.begin()->firstElement();
-
- ASSERT_EQUALS( ExpressionKeysPrivate::makeSingleHashKey( nullObj.firstElement(), 0x5eed, 0 ),
- nullFieldFromKey.Long() );
-
- // Ensure that getMissingField recognizes that the seed is different (and returns
- // the right key).
- BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL,spec);
- ASSERT_EQUALS( NumberLong, missingField.firstElement().type());
- ASSERT_EQUALS( nullFieldFromKey, missingField.firstElement());
- }
- };
-
- } // namespace MissingFieldTests
-
- namespace NamespaceDetailsTests {
-#if 0 // SERVER-13640
+/**
+ * A missing field is represented with the hash of null in a hashed index. This hash value
+ * depends on the hash seed.
+ */
+class HashedIndexMissingFieldAlternateSeed {
+public:
+ void run() {
+ OperationContextImpl txn;
+ BSONObj spec(BSON("key" << BSON("a"
+ << "hashed") << "seed" << 0x5eed));
+ BSONObj nullObj = BSON("a" << BSONNULL);
+
+ BSONObjSet nullFieldKeySet;
+ ExpressionKeysPrivate::getHashKeys(nullObj, "a", 0x5eed, 0, false, &nullFieldKeySet);
+ BSONElement nullFieldFromKey = nullFieldKeySet.begin()->firstElement();
+
+ ASSERT_EQUALS(ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), 0x5eed, 0),
+ nullFieldFromKey.Long());
+
+ // Ensure that getMissingField recognizes that the seed is different (and returns
+ // the right key).
+ BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL, spec);
+ ASSERT_EQUALS(NumberLong, missingField.firstElement().type());
+ ASSERT_EQUALS(nullFieldFromKey, missingField.firstElement());
+ }
+};
+
+} // namespace MissingFieldTests
+
+namespace NamespaceDetailsTests {
+#if 0 // SERVER-13640
class Base {
const char *ns_;
@@ -424,8 +429,8 @@ namespace NamespaceTests {
pass(0);
}
};
-#endif // SERVER-13640
-#if 0 // XXXXXX - once RecordStore is clean, we can put this back
+#endif // SERVER-13640
+#if 0 // XXXXXX - once RecordStore is clean, we can put this back
class Migrate : public Base {
public:
void run() {
@@ -459,26 +464,26 @@ namespace NamespaceTests {
};
#endif
- // This isn't a particularly useful test, and because it doesn't clean up
- // after itself, /tmp/unittest needs to be cleared after running.
- // class BigCollection : public Base {
- // public:
- // BigCollection() : Base( "NamespaceDetailsTests_BigCollection" ) {}
- // void run() {
- // create();
- // ASSERT_EQUALS( 2, nExtents() );
- // }
- // private:
- // virtual string spec() const {
- // // NOTE 256 added to size in _userCreateNS()
- // long long big = DataFile::maxSize() - DataFileHeader::HeaderSize;
- // stringstream ss;
- // ss << "{\"capped\":true,\"size\":" << big << "}";
- // return ss.str();
- // }
- // };
-
-#if 0 // SERVER-13640
+// This isn't a particularly useful test, and because it doesn't clean up
+// after itself, /tmp/unittest needs to be cleared after running.
+// class BigCollection : public Base {
+// public:
+// BigCollection() : Base( "NamespaceDetailsTests_BigCollection" ) {}
+// void run() {
+// create();
+// ASSERT_EQUALS( 2, nExtents() );
+// }
+// private:
+// virtual string spec() const {
+// // NOTE 256 added to size in _userCreateNS()
+// long long big = DataFile::maxSize() - DataFileHeader::HeaderSize;
+// stringstream ss;
+// ss << "{\"capped\":true,\"size\":" << big << "}";
+// return ss.str();
+// }
+// };
+
+#if 0 // SERVER-13640
class SwapIndexEntriesTest : public Base {
public:
void run() {
@@ -514,134 +519,132 @@ namespace NamespaceTests {
}
virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
};
-#endif // SERVER-13640
- } // namespace NamespaceDetailsTests
-
- namespace DatabaseTests {
-
- class RollbackCreateCollection {
- public:
- void run() {
- const string dbName = "rollback_create_collection";
- const string committedName = dbName + ".committed";
- const string rolledBackName = dbName + ".rolled_back";
-
- OperationContextImpl txn;
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), dbName, MODE_X);
-
- bool justCreated;
- Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
- ASSERT(justCreated);
-
- Collection* committedColl;
- {
- WriteUnitOfWork wunit(&txn);
- ASSERT_FALSE(db->getCollection(committedName));
- committedColl = db->createCollection(&txn, committedName);
- ASSERT_EQUALS(db->getCollection(committedName), committedColl);
- wunit.commit();
- }
-
- ASSERT_EQUALS(db->getCollection(committedName), committedColl);
-
- {
- WriteUnitOfWork wunit(&txn);
- ASSERT_FALSE(db->getCollection(rolledBackName));
- Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
- ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl);
- // not committing so creation should be rolled back
- }
-
- // The rolledBackCollection creation should have been rolled back
- ASSERT_FALSE(db->getCollection(rolledBackName));
-
- // The committedCollection should not have been affected by the rollback. Holders
- // of the original Collection pointer should still be valid.
- ASSERT_EQUALS(db->getCollection(committedName), committedColl);
- }
- };
-
- class RollbackDropCollection {
- public:
- void run() {
- const string dbName = "rollback_drop_collection";
- const string droppedName = dbName + ".dropped";
- const string rolledBackName = dbName + ".rolled_back";
-
- OperationContextImpl txn;
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), dbName, MODE_X);
-
- bool justCreated;
- Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
- ASSERT(justCreated);
-
- {
- WriteUnitOfWork wunit(&txn);
- ASSERT_FALSE(db->getCollection(droppedName));
- Collection* droppedColl;
- droppedColl = db->createCollection(&txn, droppedName);
- ASSERT_EQUALS(db->getCollection(droppedName), droppedColl);
- db->dropCollection(&txn, droppedName);
- wunit.commit();
- }
-
- // Should have been really dropped
- ASSERT_FALSE(db->getCollection(droppedName));
+#endif // SERVER-13640
+} // namespace NamespaceDetailsTests
+
+namespace DatabaseTests {
+
+class RollbackCreateCollection {
+public:
+ void run() {
+ const string dbName = "rollback_create_collection";
+ const string committedName = dbName + ".committed";
+ const string rolledBackName = dbName + ".rolled_back";
+
+ OperationContextImpl txn;
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock lk(txn.lockState(), dbName, MODE_X);
+
+ bool justCreated;
+ Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
+ ASSERT(justCreated);
+
+ Collection* committedColl;
+ {
+ WriteUnitOfWork wunit(&txn);
+ ASSERT_FALSE(db->getCollection(committedName));
+ committedColl = db->createCollection(&txn, committedName);
+ ASSERT_EQUALS(db->getCollection(committedName), committedColl);
+ wunit.commit();
+ }
- {
- WriteUnitOfWork wunit(&txn);
- ASSERT_FALSE(db->getCollection(rolledBackName));
- Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
- wunit.commit();
- ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl);
- db->dropCollection(&txn, rolledBackName);
- // not committing so dropping should be rolled back
- }
+ ASSERT_EQUALS(db->getCollection(committedName), committedColl);
- // The rolledBackCollection dropping should have been rolled back.
- // Original Collection pointers are no longer valid.
- ASSERT(db->getCollection(rolledBackName));
+ {
+ WriteUnitOfWork wunit(&txn);
+ ASSERT_FALSE(db->getCollection(rolledBackName));
+ Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
+ ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl);
+ // not committing so creation should be rolled back
+ }
- // The droppedCollection should not have been restored by the rollback.
- ASSERT_FALSE(db->getCollection(droppedName));
- }
- };
- } // namespace DatabaseTests
+ // The rolledBackCollection creation should have been rolled back
+ ASSERT_FALSE(db->getCollection(rolledBackName));
+
+ // The committedCollection should not have been affected by the rollback. Holders
+ // of the original Collection pointer should still be valid.
+ ASSERT_EQUALS(db->getCollection(committedName), committedColl);
+ }
+};
+
+class RollbackDropCollection {
+public:
+ void run() {
+ const string dbName = "rollback_drop_collection";
+ const string droppedName = dbName + ".dropped";
+ const string rolledBackName = dbName + ".rolled_back";
+
+ OperationContextImpl txn;
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock lk(txn.lockState(), dbName, MODE_X);
+
+ bool justCreated;
+ Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
+ ASSERT(justCreated);
+
+ {
+ WriteUnitOfWork wunit(&txn);
+ ASSERT_FALSE(db->getCollection(droppedName));
+ Collection* droppedColl;
+ droppedColl = db->createCollection(&txn, droppedName);
+ ASSERT_EQUALS(db->getCollection(droppedName), droppedColl);
+ db->dropCollection(&txn, droppedName);
+ wunit.commit();
+ }
- class All : public Suite {
- public:
- All() : Suite( "namespace" ) {
+ // Should have been really dropped
+ ASSERT_FALSE(db->getCollection(droppedName));
+
+ {
+ WriteUnitOfWork wunit(&txn);
+ ASSERT_FALSE(db->getCollection(rolledBackName));
+ Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
+ wunit.commit();
+ ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl);
+ db->dropCollection(&txn, rolledBackName);
+ // not committing so dropping should be rolled back
}
- void setupTests() {
- add< MissingFieldTests::BtreeIndexMissingField >();
- add< MissingFieldTests::TwoDIndexMissingField >();
- add< MissingFieldTests::HashedIndexMissingField >();
- add< MissingFieldTests::HashedIndexMissingFieldAlternateSeed >();
-
- // add< NamespaceDetailsTests::Create >();
- //add< NamespaceDetailsTests::SingleAlloc >();
- //add< NamespaceDetailsTests::Realloc >();
- //add< NamespaceDetailsTests::AllocCappedNotQuantized >();
- //add< NamespaceDetailsTests::TwoExtent >();
- //add< NamespaceDetailsTests::TruncateCapped >();
- //add< NamespaceDetailsTests::Migrate >();
- //add< NamespaceDetailsTests::SwapIndexEntriesTest >();
- // add< NamespaceDetailsTests::BigCollection >();
+ // The rolledBackCollection dropping should have been rolled back.
+ // Original Collection pointers are no longer valid.
+ ASSERT(db->getCollection(rolledBackName));
+
+ // The droppedCollection should not have been restored by the rollback.
+ ASSERT_FALSE(db->getCollection(droppedName));
+ }
+};
+} // namespace DatabaseTests
+
+class All : public Suite {
+public:
+ All() : Suite("namespace") {}
+
+ void setupTests() {
+ add<MissingFieldTests::BtreeIndexMissingField>();
+ add<MissingFieldTests::TwoDIndexMissingField>();
+ add<MissingFieldTests::HashedIndexMissingField>();
+ add<MissingFieldTests::HashedIndexMissingFieldAlternateSeed>();
+
+// add< NamespaceDetailsTests::Create >();
+// add< NamespaceDetailsTests::SingleAlloc >();
+// add< NamespaceDetailsTests::Realloc >();
+// add< NamespaceDetailsTests::AllocCappedNotQuantized >();
+// add< NamespaceDetailsTests::TwoExtent >();
+// add< NamespaceDetailsTests::TruncateCapped >();
+// add< NamespaceDetailsTests::Migrate >();
+// add< NamespaceDetailsTests::SwapIndexEntriesTest >();
+// add< NamespaceDetailsTests::BigCollection >();
#if 0
// until ROLLBACK_ENABLED
add< DatabaseTests::RollbackCreateCollection >();
add< DatabaseTests::RollbackDropCollection >();
#endif
- }
- };
-
- SuiteInstance<All> myall;
+ }
+};
-} // namespace NamespaceTests
+SuiteInstance<All> myall;
+} // namespace NamespaceTests
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index ff907f964e9..4714331bc6d 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -37,348 +37,394 @@
namespace OplogStartTests {
- using boost::scoped_ptr;
- using std::string;
-
- class Base {
- public:
- Base() : _txn(),
- _scopedXact(&_txn, MODE_X),
- _lk(_txn.lockState()),
- _wunit(&_txn),
- _context(&_txn, ns()),
- _client(&_txn) {
-
- Collection* c = _context.db()->getCollection(ns());
- if (!c) {
- c = _context.db()->createCollection(&_txn, ns());
- }
- ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn));
+using boost::scoped_ptr;
+using std::string;
+
+class Base {
+public:
+ Base()
+ : _txn(),
+ _scopedXact(&_txn, MODE_X),
+ _lk(_txn.lockState()),
+ _wunit(&_txn),
+ _context(&_txn, ns()),
+ _client(&_txn) {
+ Collection* c = _context.db()->getCollection(ns());
+ if (!c) {
+ c = _context.db()->createCollection(&_txn, ns());
}
+ ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn));
+ }
+
+ ~Base() {
+ client()->dropCollection(ns());
+ _wunit.commit();
+
+ // The OplogStart stage is not allowed to outlive it's RecoveryUnit.
+ _stage.reset();
+ }
+
+protected:
+ static const char* ns() {
+ return "unittests.oplogstarttests";
+ }
+ static const char* dbname() {
+ return "unittests";
+ }
+ static const char* collname() {
+ return "oplogstarttests";
+ }
+
+ Collection* collection() {
+ return _context.db()->getCollection(ns());
+ }
+
+ DBDirectClient* client() {
+ return &_client;
+ }
+
+ void setupFromQuery(const BSONObj& query) {
+ CanonicalQuery* cq;
+ Status s = CanonicalQuery::canonicalize(ns(), query, &cq);
+ ASSERT(s.isOK());
+ _cq.reset(cq);
+ _oplogws.reset(new WorkingSet());
+ _stage.reset(new OplogStart(&_txn, collection(), _cq->root(), _oplogws.get()));
+ }
+
+ void assertWorkingSetMemberHasId(WorkingSetID id, int expectedId) {
+ WorkingSetMember* member = _oplogws->get(id);
+ BSONElement idEl = member->obj.value()["_id"];
+ ASSERT(!idEl.eoo());
+ ASSERT(idEl.isNumber());
+ ASSERT_EQUALS(idEl.numberInt(), expectedId);
+ }
+
+ scoped_ptr<CanonicalQuery> _cq;
+ scoped_ptr<WorkingSet> _oplogws;
+ scoped_ptr<OplogStart> _stage;
+
+private:
+ // The order of these is important in order to ensure order of destruction
+ OperationContextImpl _txn;
+ ScopedTransaction _scopedXact;
+ Lock::GlobalWrite _lk;
+ WriteUnitOfWork _wunit;
+ Client::Context _context;
+
+ DBDirectClient _client;
+};
- ~Base() {
- client()->dropCollection(ns());
- _wunit.commit();
- // The OplogStart stage is not allowed to outlive it's RecoveryUnit.
- _stage.reset();
+/**
+ * When the ts is newer than the oldest document, the OplogStart
+ * stage should find the oldest document using a backwards collection
+ * scan.
+ */
+class OplogStartIsOldest : public Base {
+public:
+ void run() {
+ for (int i = 0; i < 10; ++i) {
+ client()->insert(ns(), BSON("_id" << i << "ts" << i));
}
- protected:
- static const char *ns() {
- return "unittests.oplogstarttests";
- }
- static const char *dbname() {
- return "unittests";
- }
- static const char *collname() {
- return "oplogstarttests";
- }
+ setupFromQuery(BSON("ts" << BSON("$gte" << 10)));
- Collection* collection() {
- return _context.db()->getCollection( ns() );
- }
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ // collection scan needs to be initialized
+ ASSERT_EQUALS(_stage->work(&id), PlanStage::NEED_TIME);
+ // finds starting record
+ ASSERT_EQUALS(_stage->work(&id), PlanStage::ADVANCED);
+ ASSERT(_stage->isBackwardsScanning());
- DBDirectClient* client() { return &_client; }
+ assertWorkingSetMemberHasId(id, 9);
+ }
+};
- void setupFromQuery(const BSONObj& query) {
- CanonicalQuery* cq;
- Status s = CanonicalQuery::canonicalize(ns(), query, &cq);
- ASSERT(s.isOK());
- _cq.reset(cq);
- _oplogws.reset(new WorkingSet());
- _stage.reset(new OplogStart(&_txn, collection(), _cq->root(), _oplogws.get()));
+/**
+ * Find the starting oplog record by scanning backwards
+ * all the way to the beginning.
+ */
+class OplogStartIsNewest : public Base {
+public:
+ void run() {
+ for (int i = 0; i < 10; ++i) {
+ client()->insert(ns(), BSON("_id" << i << "ts" << i));
}
- void assertWorkingSetMemberHasId(WorkingSetID id, int expectedId) {
- WorkingSetMember* member = _oplogws->get(id);
- BSONElement idEl = member->obj.value()["_id"];
- ASSERT(!idEl.eoo());
- ASSERT(idEl.isNumber());
- ASSERT_EQUALS(idEl.numberInt(), expectedId);
- }
+ setupFromQuery(BSON("ts" << BSON("$gte" << 1)));
- scoped_ptr<CanonicalQuery> _cq;
- scoped_ptr<WorkingSet> _oplogws;
- scoped_ptr<OplogStart> _stage;
-
- private:
- // The order of these is important in order to ensure order of destruction
- OperationContextImpl _txn;
- ScopedTransaction _scopedXact;
- Lock::GlobalWrite _lk;
- WriteUnitOfWork _wunit;
- Client::Context _context;
-
- DBDirectClient _client;
- };
-
-
- /**
- * When the ts is newer than the oldest document, the OplogStart
- * stage should find the oldest document using a backwards collection
- * scan.
- */
- class OplogStartIsOldest : public Base {
- public:
- void run() {
- for(int i = 0; i < 10; ++i) {
- client()->insert(ns(), BSON( "_id" << i << "ts" << i ));
- }
-
- setupFromQuery(BSON( "ts" << BSON( "$gte" << 10 )));
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- // collection scan needs to be initialized
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ // collection scan needs to be initialized
+ ASSERT_EQUALS(_stage->work(&id), PlanStage::NEED_TIME);
+ // full collection scan back to the first oplog record
+ for (int i = 0; i < 9; ++i) {
ASSERT_EQUALS(_stage->work(&id), PlanStage::NEED_TIME);
- // finds starting record
- ASSERT_EQUALS(_stage->work(&id), PlanStage::ADVANCED);
ASSERT(_stage->isBackwardsScanning());
+ }
+ ASSERT_EQUALS(_stage->work(&id), PlanStage::ADVANCED);
+
+ assertWorkingSetMemberHasId(id, 0);
+ }
+};
- assertWorkingSetMemberHasId(id, 9);
+/**
+ * Find the starting oplog record by hopping to the
+ * beginning of the extent.
+ */
+class OplogStartIsNewestExtentHop : public Base {
+public:
+ void run() {
+ for (int i = 0; i < 10; ++i) {
+ client()->insert(ns(), BSON("_id" << i << "ts" << i));
}
- };
-
- /**
- * Find the starting oplog record by scanning backwards
- * all the way to the beginning.
- */
- class OplogStartIsNewest : public Base {
- public:
- void run() {
- for(int i = 0; i < 10; ++i) {
- client()->insert(ns(), BSON( "_id" << i << "ts" << i ));
- }
-
- setupFromQuery(BSON( "ts" << BSON( "$gte" << 1 )));
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- // collection scan needs to be initialized
+
+ setupFromQuery(BSON("ts" << BSON("$gte" << 1)));
+
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ // ensure that we go into extent hopping mode immediately
+ _stage->setBackwardsScanTime(0);
+
+ // We immediately switch to extent hopping mode, and
+ // should find the beginning of the extent
+ ASSERT_EQUALS(_stage->work(&id), PlanStage::ADVANCED);
+ ASSERT(_stage->isExtentHopping());
+
+ assertWorkingSetMemberHasId(id, 0);
+ }
+};
+
+class SizedExtentHopBase : public Base {
+public:
+ SizedExtentHopBase() {
+ client()->dropCollection(ns());
+ }
+ virtual ~SizedExtentHopBase() {
+ client()->dropCollection(ns());
+ }
+
+ void run() {
+ buildCollection();
+
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ setupFromQuery(BSON("ts" << BSON("$gte" << tsGte())));
+
+ // ensure that we go into extent hopping mode immediately
+ _stage->setBackwardsScanTime(0);
+
+ // hop back extent by extent
+ for (int i = 0; i < numHops(); i++) {
ASSERT_EQUALS(_stage->work(&id), PlanStage::NEED_TIME);
- // full collection scan back to the first oplog record
- for (int i = 0; i < 9; ++i) {
- ASSERT_EQUALS(_stage->work(&id), PlanStage::NEED_TIME);
- ASSERT(_stage->isBackwardsScanning());
- }
- ASSERT_EQUALS(_stage->work(&id), PlanStage::ADVANCED);
-
- assertWorkingSetMemberHasId(id, 0);
- }
- };
-
- /**
- * Find the starting oplog record by hopping to the
- * beginning of the extent.
- */
- class OplogStartIsNewestExtentHop : public Base {
- public:
- void run() {
- for(int i = 0; i < 10; ++i) {
- client()->insert(ns(), BSON( "_id" << i << "ts" << i));
- }
-
- setupFromQuery(BSON( "ts" << BSON( "$gte" << 1 )));
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- // ensure that we go into extent hopping mode immediately
- _stage->setBackwardsScanTime(0);
-
- // We immediately switch to extent hopping mode, and
- // should find the beginning of the extent
- ASSERT_EQUALS(_stage->work(&id), PlanStage::ADVANCED);
ASSERT(_stage->isExtentHopping());
-
- assertWorkingSetMemberHasId(id, 0);
}
- };
+ // find the right loc without hopping again
+ ASSERT_EQUALS(_stage->work(&id), finalState());
- class SizedExtentHopBase : public Base {
- public:
- SizedExtentHopBase() {
- client()->dropCollection(ns());
+ int startDocId = tsGte() - 1;
+ if (startDocId >= 0) {
+ assertWorkingSetMemberHasId(id, startDocId);
}
- virtual ~SizedExtentHopBase() {
- client()->dropCollection(ns());
+ }
+
+protected:
+ void buildCollection() {
+ BSONObj info;
+ // Create a collection with specified extent sizes
+ BSONObj command = BSON("create" << collname() << "capped" << true << "$nExtents"
+ << extentSizes() << "autoIndexId" << false);
+ ASSERT(client()->runCommand(dbname(), command, info));
+
+ // Populate documents.
+ for (int i = 0; i < numDocs(); ++i) {
+ client()->insert(ns(), BSON("_id" << i << "ts" << i << "payload" << payload8k()));
}
+ }
+
+ static string payload8k() {
+ return string(8 * 1024, 'a');
+ }
+ /** An extent of this size is too small to contain one document containing payload8k(). */
+ static int tooSmall() {
+ return 1 * 1024;
+ }
+ /** An extent of this size fits one document. */
+ static int fitsOne() {
+ return 10 * 1024;
+ }
+ /** An extent of this size fits many documents. */
+ static int fitsMany() {
+ return 50 * 1024;
+ }
+
+ // to be defined by subclasses
+ virtual BSONArray extentSizes() const = 0;
+ virtual int numDocs() const = 0;
+ virtual int numHops() const = 0;
+ virtual PlanStage::StageState finalState() const {
+ return PlanStage::ADVANCED;
+ }
+ virtual int tsGte() const {
+ return 1;
+ }
+};
- void run() {
- buildCollection();
+/**
+ * Test hopping over a single empty extent.
+ *
+ * Collection structure:
+ *
+ * [--- extent 0 --] [ ext 1 ] [--- extent 2 ---]
+ * [ {_id: 0} ] [<empty>] [ {_id: 1} ]
+ */
+class OplogStartOneEmptyExtent : public SizedExtentHopBase {
+ virtual int numDocs() const {
+ return 2;
+ }
+ virtual int numHops() const {
+ return 1;
+ }
+ virtual BSONArray extentSizes() const {
+ return BSON_ARRAY(fitsOne() << tooSmall() << fitsOne());
+ }
+};
- WorkingSetID id = WorkingSet::INVALID_ID;
- setupFromQuery(BSON( "ts" << BSON( "$gte" << tsGte() )));
+/**
+ * Test hopping over two consecutive empty extents.
+ *
+ * Collection structure:
+ *
+ * [--- extent 0 --] [ ext 1 ] [ ext 2 ] [--- extent 3 ---]
+ * [ {_id: 0} ] [<empty>] [<empty>] [ {_id: 1} ]
+ */
+class OplogStartTwoEmptyExtents : public SizedExtentHopBase {
+ virtual int numDocs() const {
+ return 2;
+ }
+ virtual int numHops() const {
+ return 1;
+ }
+ virtual BSONArray extentSizes() const {
+ return BSON_ARRAY(fitsOne() << tooSmall() << tooSmall() << fitsOne());
+ }
+};
- // ensure that we go into extent hopping mode immediately
- _stage->setBackwardsScanTime(0);
+/**
+ * Two extents, each filled with several documents. This
+ * should require us to make just a single extent hop.
+ */
+class OplogStartTwoFullExtents : public SizedExtentHopBase {
+ virtual int numDocs() const {
+ return 10;
+ }
+ virtual int numHops() const {
+ return 1;
+ }
+ virtual BSONArray extentSizes() const {
+ return BSON_ARRAY(fitsMany() << fitsMany());
+ }
+};
- // hop back extent by extent
- for (int i = 0; i < numHops(); i++) {
- ASSERT_EQUALS(_stage->work(&id), PlanStage::NEED_TIME);
- ASSERT(_stage->isExtentHopping());
- }
- // find the right loc without hopping again
- ASSERT_EQUALS(_stage->work(&id), finalState());
+/**
+ * Four extents in total. Three are populated with multiple
+ * documents, but one of the middle extents is empty. This
+ * should require two extent hops.
+ */
+class OplogStartThreeFullOneEmpty : public SizedExtentHopBase {
+ virtual int numDocs() const {
+ return 14;
+ }
+ virtual int numHops() const {
+ return 2;
+ }
+ virtual BSONArray extentSizes() const {
+ return BSON_ARRAY(fitsMany() << fitsMany() << tooSmall() << fitsMany());
+ }
+};
- int startDocId = tsGte() - 1;
- if (startDocId >= 0) {
- assertWorkingSetMemberHasId(id, startDocId);
- }
- }
+/**
+ * Test that extent hopping mode works properly in the
+ * special case of one extent.
+ */
+class OplogStartOneFullExtent : public SizedExtentHopBase {
+ virtual int numDocs() const {
+ return 4;
+ }
+ virtual int numHops() const {
+ return 0;
+ }
+ virtual BSONArray extentSizes() const {
+ return BSON_ARRAY(fitsMany());
+ }
+};
- protected:
- void buildCollection() {
- BSONObj info;
- // Create a collection with specified extent sizes
- BSONObj command = BSON( "create" << collname() << "capped" << true <<
- "$nExtents" << extentSizes() << "autoIndexId" << false );
- ASSERT(client()->runCommand(dbname(), command, info));
-
- // Populate documents.
- for(int i = 0; i < numDocs(); ++i) {
- client()->insert(ns(), BSON( "_id" << i << "ts" << i << "payload" << payload8k() ));
- }
- }
+/**
+ * Collection structure:
+ *
+ * [ ext 0 ] [--- extent 1 --] [--- extent 2 ---]
+ * [<empty>] [ {_id: 0} ] [ {_id: 1} ]
+ */
+class OplogStartFirstExtentEmpty : public SizedExtentHopBase {
+ virtual int numDocs() const {
+ return 2;
+ }
+ virtual int numHops() const {
+ return 1;
+ }
+ virtual BSONArray extentSizes() const {
+ return BSON_ARRAY(tooSmall() << fitsOne() << fitsOne());
+ }
+};
- static string payload8k() { return string(8*1024, 'a'); }
- /** An extent of this size is too small to contain one document containing payload8k(). */
- static int tooSmall() { return 1*1024; }
- /** An extent of this size fits one document. */
- static int fitsOne() { return 10*1024; }
- /** An extent of this size fits many documents. */
- static int fitsMany() { return 50*1024; }
-
- // to be defined by subclasses
- virtual BSONArray extentSizes() const = 0;
- virtual int numDocs() const = 0;
- virtual int numHops() const = 0;
- virtual PlanStage::StageState finalState() const { return PlanStage::ADVANCED; }
- virtual int tsGte() const { return 1; }
- };
-
- /**
- * Test hopping over a single empty extent.
- *
- * Collection structure:
- *
- * [--- extent 0 --] [ ext 1 ] [--- extent 2 ---]
- * [ {_id: 0} ] [<empty>] [ {_id: 1} ]
- */
- class OplogStartOneEmptyExtent : public SizedExtentHopBase {
- virtual int numDocs() const { return 2; }
- virtual int numHops() const { return 1; }
- virtual BSONArray extentSizes() const {
- return BSON_ARRAY( fitsOne() << tooSmall() << fitsOne() );
- }
- };
-
- /**
- * Test hopping over two consecutive empty extents.
- *
- * Collection structure:
- *
- * [--- extent 0 --] [ ext 1 ] [ ext 2 ] [--- extent 3 ---]
- * [ {_id: 0} ] [<empty>] [<empty>] [ {_id: 1} ]
- */
- class OplogStartTwoEmptyExtents : public SizedExtentHopBase {
- virtual int numDocs() const { return 2; }
- virtual int numHops() const { return 1; }
- virtual BSONArray extentSizes() const {
- return BSON_ARRAY( fitsOne() << tooSmall() << tooSmall() << fitsOne() );
- }
- };
-
- /**
- * Two extents, each filled with several documents. This
- * should require us to make just a single extent hop.
- */
- class OplogStartTwoFullExtents : public SizedExtentHopBase {
- virtual int numDocs() const { return 10; }
- virtual int numHops() const { return 1; }
- virtual BSONArray extentSizes() const {
- return BSON_ARRAY( fitsMany() << fitsMany() );
- }
- };
-
- /**
- * Four extents in total. Three are populated with multiple
- * documents, but one of the middle extents is empty. This
- * should require two extent hops.
- */
- class OplogStartThreeFullOneEmpty : public SizedExtentHopBase {
- virtual int numDocs() const { return 14; }
- virtual int numHops() const { return 2; }
- virtual BSONArray extentSizes() const {
- return BSON_ARRAY( fitsMany() << fitsMany() << tooSmall() << fitsMany() );
- }
- };
-
- /**
- * Test that extent hopping mode works properly in the
- * special case of one extent.
- */
- class OplogStartOneFullExtent : public SizedExtentHopBase {
- virtual int numDocs() const { return 4; }
- virtual int numHops() const { return 0; }
- virtual BSONArray extentSizes() const {
- return BSON_ARRAY( fitsMany() );
- }
- };
-
- /**
- * Collection structure:
- *
- * [ ext 0 ] [--- extent 1 --] [--- extent 2 ---]
- * [<empty>] [ {_id: 0} ] [ {_id: 1} ]
- */
- class OplogStartFirstExtentEmpty : public SizedExtentHopBase {
- virtual int numDocs() const { return 2; }
- virtual int numHops() const { return 1; }
- virtual BSONArray extentSizes() const {
- return BSON_ARRAY( tooSmall() << fitsOne() << fitsOne() );
- }
- };
-
- /**
- * Find that we need to start from the very beginning of
- * the collection (the EOF case), after extent hopping
- * to the beginning.
- *
- * This requires two hops: one between the two extents,
- * and one to hop back to the "null extent" which precedes
- * the first extent.
- */
- class OplogStartEOF : public SizedExtentHopBase {
- virtual int numDocs() const { return 2; }
- virtual int numHops() const { return 2; }
- virtual BSONArray extentSizes() const {
- return BSON_ARRAY( fitsOne() << fitsOne() );
- }
- virtual PlanStage::StageState finalState() const { return PlanStage::IS_EOF; }
- virtual int tsGte() const { return 0; }
- };
-
- class All : public Suite {
- public:
- All() : Suite("oplogstart") { }
-
- void setupTests() {
- add< OplogStartIsOldest >();
- add< OplogStartIsNewest >();
-
- // These tests rely on extent allocation details specific to mmapv1.
- // TODO figure out a way to generically test this.
- if (getGlobalEnvironment()->getGlobalStorageEngine()->isMmapV1()) {
- add< OplogStartIsNewestExtentHop >();
- add< OplogStartOneEmptyExtent >();
- add< OplogStartTwoEmptyExtents >();
- add< OplogStartTwoFullExtents >();
- add< OplogStartThreeFullOneEmpty >();
- add< OplogStartOneFullExtent >();
- add< OplogStartFirstExtentEmpty >();
- add< OplogStartEOF >();
- }
+/**
+ * Find that we need to start from the very beginning of
+ * the collection (the EOF case), after extent hopping
+ * to the beginning.
+ *
+ * This requires two hops: one between the two extents,
+ * and one to hop back to the "null extent" which precedes
+ * the first extent.
+ */
+class OplogStartEOF : public SizedExtentHopBase {
+ virtual int numDocs() const {
+ return 2;
+ }
+ virtual int numHops() const {
+ return 2;
+ }
+ virtual BSONArray extentSizes() const {
+ return BSON_ARRAY(fitsOne() << fitsOne());
+ }
+ virtual PlanStage::StageState finalState() const {
+ return PlanStage::IS_EOF;
+ }
+ virtual int tsGte() const {
+ return 0;
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("oplogstart") {}
+
+ void setupTests() {
+ add<OplogStartIsOldest>();
+ add<OplogStartIsNewest>();
+
+ // These tests rely on extent allocation details specific to mmapv1.
+ // TODO figure out a way to generically test this.
+ if (getGlobalEnvironment()->getGlobalStorageEngine()->isMmapV1()) {
+ add<OplogStartIsNewestExtentHop>();
+ add<OplogStartOneEmptyExtent>();
+ add<OplogStartTwoEmptyExtents>();
+ add<OplogStartTwoFullExtents>();
+ add<OplogStartThreeFullOneEmpty>();
+ add<OplogStartOneFullExtent>();
+ add<OplogStartFirstExtentEmpty>();
+ add<OplogStartEOF>();
}
- };
+ }
+};
- SuiteInstance<All> oplogStart;
+SuiteInstance<All> oplogStart;
-} // namespace OplogStartTests
+} // namespace OplogStartTests
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index fbf557e9f40..152db001399 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -40,133 +40,128 @@
namespace PdfileTests {
- namespace Insert {
- class Base {
- public:
- Base() : _scopedXact(&_txn, MODE_X),
- _lk(_txn.lockState()),
- _context(&_txn, ns()) {
-
- }
-
- virtual ~Base() {
- if ( !collection() )
- return;
- WriteUnitOfWork wunit(&_txn);
- _context.db()->dropCollection( &_txn, ns() );
- wunit.commit();
- }
-
- protected:
- const char *ns() {
- return "unittests.pdfiletests.Insert";
- }
- Collection* collection() {
- return _context.db()->getCollection( ns() );
- }
-
- OperationContextImpl _txn;
- ScopedTransaction _scopedXact;
- Lock::GlobalWrite _lk;
- Client::Context _context;
- };
-
- class InsertNoId : public Base {
- public:
- void run() {
- WriteUnitOfWork wunit(&_txn);
- BSONObj x = BSON( "x" << 1 );
- ASSERT( x["_id"].type() == 0 );
- Collection* collection = _context.db()->getOrCreateCollection( &_txn, ns() );
- StatusWith<RecordId> dl = collection->insertDocument( &_txn, x, true );
- ASSERT( !dl.isOK() );
-
- StatusWith<BSONObj> fixed = fixDocumentForInsert( x );
- ASSERT( fixed.isOK() );
- x = fixed.getValue();
- ASSERT( x["_id"].type() == jstOID );
- dl = collection->insertDocument( &_txn, x, true );
- ASSERT( dl.isOK() );
- wunit.commit();
- }
- };
-
- class UpdateDate : public Base {
- public:
- void run() {
- BSONObjBuilder b;
- b.appendTimestamp( "a" );
- b.append( "_id", 1 );
- BSONObj o = b.done();
-
- BSONObj fixed = fixDocumentForInsert( o ).getValue();
- ASSERT_EQUALS( 2, fixed.nFields() );
- ASSERT( fixed.firstElement().fieldNameStringData() == "_id" );
- ASSERT( fixed.firstElement().number() == 1 );
-
- BSONElement a = fixed["a"];
- ASSERT( o["a"].type() == Timestamp );
- ASSERT( o["a"].timestampValue() == 0 );
- ASSERT( a.type() == Timestamp );
- ASSERT( a.timestampValue() > 0 );
- }
- };
-
- class UpdateDate2 : public Base {
- public:
- void run() {
- BSONObj o;
- {
- BSONObjBuilder b;
- b.appendTimestamp( "a" );
- b.appendTimestamp( "b" );
- b.append( "_id", 1 );
- o = b.obj();
- }
-
- BSONObj fixed = fixDocumentForInsert( o ).getValue();
- ASSERT_EQUALS( 3, fixed.nFields() );
- ASSERT( fixed.firstElement().fieldNameStringData() == "_id" );
- ASSERT( fixed.firstElement().number() == 1 );
-
- BSONElement a = fixed["a"];
- ASSERT( o["a"].type() == Timestamp );
- ASSERT( o["a"].timestampValue() == 0 );
- ASSERT( a.type() == Timestamp );
- ASSERT( a.timestampValue() > 0 );
-
- BSONElement b = fixed["b"];
- ASSERT( o["b"].type() == Timestamp );
- ASSERT( o["b"].timestampValue() == 0 );
- ASSERT( b.type() == Timestamp );
- ASSERT( b.timestampValue() > 0 );
- }
- };
-
- class ValidId : public Base {
- public:
- void run() {
- ASSERT( fixDocumentForInsert( BSON( "_id" << 5 ) ).isOK() );
- ASSERT( fixDocumentForInsert( BSON( "_id" << BSON( "x" << 5 ) ) ).isOK() );
- ASSERT( !fixDocumentForInsert( BSON( "_id" << BSON( "$x" << 5 ) ) ).isOK() );
- ASSERT( !fixDocumentForInsert( BSON( "_id" << BSON( "$oid" << 5 ) ) ).isOK() );
- }
- };
- } // namespace Insert
-
- class All : public Suite {
- public:
- All() : Suite( "pdfile" ) {}
-
- void setupTests() {
- add< Insert::InsertNoId >();
- add< Insert::UpdateDate >();
- add< Insert::UpdateDate2 >();
- add< Insert::ValidId >();
+namespace Insert {
+class Base {
+public:
+ Base() : _scopedXact(&_txn, MODE_X), _lk(_txn.lockState()), _context(&_txn, ns()) {}
+
+ virtual ~Base() {
+ if (!collection())
+ return;
+ WriteUnitOfWork wunit(&_txn);
+ _context.db()->dropCollection(&_txn, ns());
+ wunit.commit();
+ }
+
+protected:
+ const char* ns() {
+ return "unittests.pdfiletests.Insert";
+ }
+ Collection* collection() {
+ return _context.db()->getCollection(ns());
+ }
+
+ OperationContextImpl _txn;
+ ScopedTransaction _scopedXact;
+ Lock::GlobalWrite _lk;
+ Client::Context _context;
+};
+
+class InsertNoId : public Base {
+public:
+ void run() {
+ WriteUnitOfWork wunit(&_txn);
+ BSONObj x = BSON("x" << 1);
+ ASSERT(x["_id"].type() == 0);
+ Collection* collection = _context.db()->getOrCreateCollection(&_txn, ns());
+ StatusWith<RecordId> dl = collection->insertDocument(&_txn, x, true);
+ ASSERT(!dl.isOK());
+
+ StatusWith<BSONObj> fixed = fixDocumentForInsert(x);
+ ASSERT(fixed.isOK());
+ x = fixed.getValue();
+ ASSERT(x["_id"].type() == jstOID);
+ dl = collection->insertDocument(&_txn, x, true);
+ ASSERT(dl.isOK());
+ wunit.commit();
+ }
+};
+
+class UpdateDate : public Base {
+public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendTimestamp("a");
+ b.append("_id", 1);
+ BSONObj o = b.done();
+
+ BSONObj fixed = fixDocumentForInsert(o).getValue();
+ ASSERT_EQUALS(2, fixed.nFields());
+ ASSERT(fixed.firstElement().fieldNameStringData() == "_id");
+ ASSERT(fixed.firstElement().number() == 1);
+
+ BSONElement a = fixed["a"];
+ ASSERT(o["a"].type() == Timestamp);
+ ASSERT(o["a"].timestampValue() == 0);
+ ASSERT(a.type() == Timestamp);
+ ASSERT(a.timestampValue() > 0);
+ }
+};
+
+class UpdateDate2 : public Base {
+public:
+ void run() {
+ BSONObj o;
+ {
+ BSONObjBuilder b;
+ b.appendTimestamp("a");
+ b.appendTimestamp("b");
+ b.append("_id", 1);
+ o = b.obj();
}
- };
-
- SuiteInstance<All> myall;
-
-} // namespace PdfileTests
+ BSONObj fixed = fixDocumentForInsert(o).getValue();
+ ASSERT_EQUALS(3, fixed.nFields());
+ ASSERT(fixed.firstElement().fieldNameStringData() == "_id");
+ ASSERT(fixed.firstElement().number() == 1);
+
+ BSONElement a = fixed["a"];
+ ASSERT(o["a"].type() == Timestamp);
+ ASSERT(o["a"].timestampValue() == 0);
+ ASSERT(a.type() == Timestamp);
+ ASSERT(a.timestampValue() > 0);
+
+ BSONElement b = fixed["b"];
+ ASSERT(o["b"].type() == Timestamp);
+ ASSERT(o["b"].timestampValue() == 0);
+ ASSERT(b.type() == Timestamp);
+ ASSERT(b.timestampValue() > 0);
+ }
+};
+
+class ValidId : public Base {
+public:
+ void run() {
+ ASSERT(fixDocumentForInsert(BSON("_id" << 5)).isOK());
+ ASSERT(fixDocumentForInsert(BSON("_id" << BSON("x" << 5))).isOK());
+ ASSERT(!fixDocumentForInsert(BSON("_id" << BSON("$x" << 5))).isOK());
+ ASSERT(!fixDocumentForInsert(BSON("_id" << BSON("$oid" << 5))).isOK());
+ }
+};
+} // namespace Insert
+
+class All : public Suite {
+public:
+ All() : Suite("pdfile") {}
+
+ void setupTests() {
+ add<Insert::InsertNoId>();
+ add<Insert::UpdateDate>();
+ add<Insert::UpdateDate2>();
+ add<Insert::ValidId>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace PdfileTests
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index b10b5005071..57872498371 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -73,1454 +73,1691 @@
namespace PerfTests {
- using boost::shared_ptr;
- using std::cout;
- using std::endl;
- using std::fixed;
- using std::ifstream;
- using std::left;
- using std::min;
- using std::right;
- using std::setprecision;
- using std::setw;
- using std::string;
- using std::vector;
-
- const bool profiling = false;
-
- class ClientBase {
- public:
- ClientBase() : _client(&_txn) {
- _prevError = mongo::lastError._get( false );
- mongo::lastError.release();
- mongo::lastError.reset( new LastError() );
- }
- virtual ~ClientBase() {
- mongo::lastError.reset( _prevError );
- }
+using boost::shared_ptr;
+using std::cout;
+using std::endl;
+using std::fixed;
+using std::ifstream;
+using std::left;
+using std::min;
+using std::right;
+using std::setprecision;
+using std::setw;
+using std::string;
+using std::vector;
+
+const bool profiling = false;
+
+class ClientBase {
+public:
+ ClientBase() : _client(&_txn) {
+ _prevError = mongo::lastError._get(false);
+ mongo::lastError.release();
+ mongo::lastError.reset(new LastError());
+ }
+ virtual ~ClientBase() {
+ mongo::lastError.reset(_prevError);
+ }
- protected:
- void insert( const char *ns, BSONObj o ) {
- _client.insert( ns, o );
- }
- void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
- _client.update( ns, Query( q ), o, upsert );
- }
- bool error() {
- return !_client.getPrevError().getField( "err" ).isNull();
- }
+protected:
+ void insert(const char* ns, BSONObj o) {
+ _client.insert(ns, o);
+ }
+ void update(const char* ns, BSONObj q, BSONObj o, bool upsert = 0) {
+ _client.update(ns, Query(q), o, upsert);
+ }
+ bool error() {
+ return !_client.getPrevError().getField("err").isNull();
+ }
- DBClientBase* client() { return &_client; }
- OperationContext* txn() { return &_txn; }
+ DBClientBase* client() {
+ return &_client;
+ }
+ OperationContext* txn() {
+ return &_txn;
+ }
- private:
- LastError* _prevError;
- OperationContextImpl _txn;
- DBDirectClient _client;
- };
+private:
+ LastError* _prevError;
+ OperationContextImpl _txn;
+ DBDirectClient _client;
+};
- /* if you want recording of the timings, place the password for the perf database
- in ./../settings.py:
- pstatspassword="<pwd>"
- */
- static boost::shared_ptr<DBClientConnection> conn;
- static string _perfhostname;
- void pstatsConnect() {
- // no writing to perf db if _DEBUG
- DEV return;
-
- const char *fn = "../../settings.py";
- if( !boost::filesystem::exists(fn) ) {
- if( boost::filesystem::exists("settings.py") )
- fn = "settings.py";
- else {
- cout << "no ../../settings.py or ./settings.py file found. will not write perf stats to pstats db." << endl;
- cout << "it is recommended this be enabled even on dev boxes" << endl;
- return;
- }
+/* if you want recording of the timings, place the password for the perf database
+ in ./../settings.py:
+ pstatspassword="<pwd>"
+*/
+static boost::shared_ptr<DBClientConnection> conn;
+static string _perfhostname;
+void pstatsConnect() {
+ // no writing to perf db if _DEBUG
+ DEV return;
+
+ const char* fn = "../../settings.py";
+ if (!boost::filesystem::exists(fn)) {
+ if (boost::filesystem::exists("settings.py"))
+ fn = "settings.py";
+ else {
+ cout << "no ../../settings.py or ./settings.py file found. will not write perf stats "
+ "to pstats db." << endl;
+ cout << "it is recommended this be enabled even on dev boxes" << endl;
+ return;
}
+ }
- try {
- if( conn == 0 ) {
- MemoryMappedFile f;
- const char *p = (const char *) f.mapWithOptions(fn, MongoFile::READONLY);
- string pwd;
+ try {
+ if (conn == 0) {
+ MemoryMappedFile f;
+ const char* p = (const char*)f.mapWithOptions(fn, MongoFile::READONLY);
+ string pwd;
- {
- const char *q = str::after(p, "pstatspassword=\"");
- if( *q == 0 ) {
- cout << "info perftests.cpp: no pstatspassword= in settings.py" << endl;
- return;
- }
- else {
- pwd = str::before(q, '\"');
- }
+ {
+ const char* q = str::after(p, "pstatspassword=\"");
+ if (*q == 0) {
+ cout << "info perftests.cpp: no pstatspassword= in settings.py" << endl;
+ return;
+ } else {
+ pwd = str::before(q, '\"');
}
+ }
- boost::shared_ptr<DBClientConnection> c(new DBClientConnection(false, 60));
- string err;
- if( c->connect(HostAndPort("perfdb.10gen.cc"), err) ) {
- if( !c->auth("perf", "perf", pwd, err) ) {
- cout << "info: authentication with stats db failed: " << err << endl;
- verify(false);
- }
- conn = c;
-
- // override the hostname with the buildbot hostname, if present
- ifstream hostf( "../../info/host" );
- if ( hostf.good() ) {
- char buf[1024];
- hostf.getline(buf, sizeof(buf));
- _perfhostname = buf;
- }
- else {
- _perfhostname = getHostName();
- }
+ boost::shared_ptr<DBClientConnection> c(new DBClientConnection(false, 60));
+ string err;
+ if (c->connect(HostAndPort("perfdb.10gen.cc"), err)) {
+ if (!c->auth("perf", "perf", pwd, err)) {
+ cout << "info: authentication with stats db failed: " << err << endl;
+ verify(false);
}
- else {
- cout << err << " (to log perfstats)" << endl;
+ conn = c;
+
+ // override the hostname with the buildbot hostname, if present
+ ifstream hostf("../../info/host");
+ if (hostf.good()) {
+ char buf[1024];
+ hostf.getline(buf, sizeof(buf));
+ _perfhostname = buf;
+ } else {
+ _perfhostname = getHostName();
}
+ } else {
+ cout << err << " (to log perfstats)" << endl;
}
}
- catch(...) {
- cout << "pstatsConnect() didn't work; ignoring" << endl;
- }
+ } catch (...) {
+ cout << "pstatsConnect() didn't work; ignoring" << endl;
}
+}
- class B : public ClientBase {
- string _ns;
- protected:
- const char *ns() { return _ns.c_str(); }
+class B : public ClientBase {
+ string _ns;
- // anything you want to do before being timed
- virtual void prep() { }
+protected:
+ const char* ns() {
+ return _ns.c_str();
+ }
- // anything you want to do before threaded test
- virtual void prepThreaded() {}
+ // anything you want to do before being timed
+ virtual void prep() {}
- virtual void timed() = 0;
+ // anything you want to do before threaded test
+ virtual void prepThreaded() {}
- // optional 2nd test phase to be timed separately. You must provide it with a unique
- // name in order for it to run by overloading 'name2'.
- virtual void timed2(DBClientBase*) {}
+ virtual void timed() = 0;
- // return name of second test.
- virtual string name2() { return name(); }
+ // optional 2nd test phase to be timed separately. You must provide it with a unique
+ // name in order for it to run by overloading 'name2'.
+ virtual void timed2(DBClientBase*) {}
- virtual void post() { }
+ // return name of second test.
+ virtual string name2() {
+ return name();
+ }
- virtual string name() = 0;
+ virtual void post() {}
- // how long to run test. 0 is a sentinel which means just run the timed() method once and time it.
- virtual int howLongMillis() { return profiling ? 30000 : 5000; }
+ virtual string name() = 0;
- /* override if your test output doesn't need that */
- virtual bool showDurStats() { return true; }
+ // how long to run test. 0 is a sentinel which means just run the timed() method once and time it.
+ virtual int howLongMillis() {
+ return profiling ? 30000 : 5000;
+ }
- public:
- virtual unsigned batchSize() { return 50; }
+ /* override if your test output doesn't need that */
+ virtual bool showDurStats() {
+ return true;
+ }
- void say(unsigned long long n, long long us, string s) {
- unsigned long long rps = (n*1000*1000)/(us > 0 ? us : 1);
- cout << "stats " << setw(42) << left << s << ' ' << right << setw(9) << rps << ' ' << right << setw(5) << us/1000 << "ms ";
- if (showDurStats()) {
- cout << dur::stats.curr()->_asCSV();
- }
- cout << endl;
-
- if( conn && !conn->isFailed() ) {
- const char *ns = "perf.pstats";
- if(frameworkGlobalParams.perfHist) {
- static bool needver = true;
- try {
- // try to report rps from last time */
- Query q;
- {
- BSONObjBuilder b;
- b.append("host", _perfhostname);
- b.append("test", s);
- b.append("dur", storageGlobalParams.dur);
- DEV { b.append("info.DEBUG",true); }
- else b.appendNull("info.DEBUG");
- if( sizeof(int*) == 4 )
- b.append("info.bits", 32);
- else
- b.appendNull("info.bits");
- q = Query(b.obj()).sort("when",-1);
+public:
+ virtual unsigned batchSize() {
+ return 50;
+ }
+
+ void say(unsigned long long n, long long us, string s) {
+ unsigned long long rps = (n * 1000 * 1000) / (us > 0 ? us : 1);
+ cout << "stats " << setw(42) << left << s << ' ' << right << setw(9) << rps << ' ' << right
+ << setw(5) << us / 1000 << "ms ";
+ if (showDurStats()) {
+ cout << dur::stats.curr()->_asCSV();
+ }
+ cout << endl;
+
+ if (conn && !conn->isFailed()) {
+ const char* ns = "perf.pstats";
+ if (frameworkGlobalParams.perfHist) {
+ static bool needver = true;
+ try {
+ // try to report rps from last time */
+ Query q;
+ {
+ BSONObjBuilder b;
+ b.append("host", _perfhostname);
+ b.append("test", s);
+ b.append("dur", storageGlobalParams.dur);
+ DEV {
+ b.append("info.DEBUG", true);
}
- BSONObj fields = BSON( "rps" << 1 << "info" << 1 );
- vector<BSONObj> v;
- conn->findN(v, ns, q, frameworkGlobalParams.perfHist, 0, &fields);
- for( vector<BSONObj>::iterator i = v.begin(); i != v.end(); i++ ) {
- BSONObj o = *i;
- double lastrps = o["rps"].Number();
- if( 0 && lastrps ) {
- cout << "stats " << setw(42) << right << "new/old:" << ' ' << setw(9);
- cout << fixed << setprecision(2) << rps / lastrps;
- if( needver ) {
- cout << " " << o.getFieldDotted("info.git").toString();
- }
- cout << '\n';
+ else b.appendNull("info.DEBUG");
+ if (sizeof(int*) == 4)
+ b.append("info.bits", 32);
+ else
+ b.appendNull("info.bits");
+ q = Query(b.obj()).sort("when", -1);
+ }
+ BSONObj fields = BSON("rps" << 1 << "info" << 1);
+ vector<BSONObj> v;
+ conn->findN(v, ns, q, frameworkGlobalParams.perfHist, 0, &fields);
+ for (vector<BSONObj>::iterator i = v.begin(); i != v.end(); i++) {
+ BSONObj o = *i;
+ double lastrps = o["rps"].Number();
+ if (0 && lastrps) {
+ cout << "stats " << setw(42) << right << "new/old:" << ' ' << setw(9);
+ cout << fixed << setprecision(2) << rps / lastrps;
+ if (needver) {
+ cout << " " << o.getFieldDotted("info.git").toString();
}
+ cout << '\n';
}
- } catch(...) { }
- cout.flush();
- needver = false;
- }
- {
- bob b;
- b.append("host", _perfhostname);
- b.appendTimeT("when", time(0));
- b.append("test", s);
- b.append("rps", (int) rps);
- b.append("millis", us/1000);
- b.appendBool("dur", storageGlobalParams.dur);
- if (showDurStats() && storageGlobalParams.dur) {
- b.append("durStats", dur::stats.asObj());
}
+ } catch (...) {
+ }
+ cout.flush();
+ needver = false;
+ }
+ {
+ bob b;
+ b.append("host", _perfhostname);
+ b.appendTimeT("when", time(0));
+ b.append("test", s);
+ b.append("rps", (int)rps);
+ b.append("millis", us / 1000);
+ b.appendBool("dur", storageGlobalParams.dur);
+ if (showDurStats() && storageGlobalParams.dur) {
+ b.append("durStats", dur::stats.asObj());
+ }
- {
- bob inf;
- inf.append("version", versionString);
- if( sizeof(int*) == 4 ) inf.append("bits", 32);
- DEV inf.append("DEBUG", true);
+ {
+ bob inf;
+ inf.append("version", versionString);
+ if (sizeof(int*) == 4)
+ inf.append("bits", 32);
+ DEV inf.append("DEBUG", true);
#if defined(_WIN32)
- inf.append("os", "win");
+ inf.append("os", "win");
#endif
- inf.append("git", gitVersion());
+ inf.append("git", gitVersion());
#ifdef MONGO_SSL
- inf.append("OpenSSL", openSSLVersion());
+ inf.append("OpenSSL", openSSLVersion());
#endif
- inf.append("boost", BOOST_VERSION);
- b.append("info", inf.obj());
- }
- BSONObj o = b.obj();
- //cout << "inserting " << o.toString() << endl;
- try {
- conn->insert(ns, o);
- }
- catch ( std::exception& e ) {
- warning() << "couldn't save perf results: " << e.what() << endl;
- }
+ inf.append("boost", BOOST_VERSION);
+ b.append("info", inf.obj());
+ }
+ BSONObj o = b.obj();
+ // cout << "inserting " << o.toString() << endl;
+ try {
+ conn->insert(ns, o);
+ } catch (std::exception& e) {
+ warning() << "couldn't save perf results: " << e.what() << endl;
}
}
}
+ }
- /** if true runs timed2() again with several threads (8 at time of this writing).
- */
- virtual bool testThreaded() { return false; }
+ /** if true runs timed2() again with several threads (8 at time of this writing).
+ */
+ virtual bool testThreaded() {
+ return false;
+ }
- int howLong() {
- int hlm = howLongMillis();
- DEV {
- // don't run very long with _DEBUG - not very meaningful anyway on that build
- hlm = min(hlm, 500);
- }
- return hlm;
+ int howLong() {
+ int hlm = howLongMillis();
+ DEV {
+ // don't run very long with _DEBUG - not very meaningful anyway on that build
+ hlm = min(hlm, 500);
}
+ return hlm;
+ }
- void run() {
+ void run() {
+ unsigned long long n = 0;
- unsigned long long n = 0;
+ _ns = string("perftest.") + name();
+ client()->dropCollection(ns());
+ prep();
+ int hlm = howLong();
+ mongo::Timer t;
+ n = 0;
+ const unsigned int Batch = batchSize();
- _ns = string("perftest.") + name();
- client()->dropCollection(ns());
- prep();
- int hlm = howLong();
- mongo::Timer t;
- n = 0;
- const unsigned int Batch = batchSize();
-
- if( hlm == 0 ) {
- // means just do once
- timed();
- }
- else {
- do {
- unsigned int i;
- for( i = 0; i < Batch; i++ )
- timed();
- n += i;
- } while( t.micros() < (hlm * 1000) );
- }
+ if (hlm == 0) {
+ // means just do once
+ timed();
+ } else {
+ do {
+ unsigned int i;
+ for (i = 0; i < Batch; i++)
+ timed();
+ n += i;
+ } while (t.micros() < (hlm * 1000));
+ }
- client()->getLastError(); // block until all ops are finished
+ client()->getLastError(); // block until all ops are finished
- say(n, t.micros(), name());
+ say(n, t.micros(), name());
- post();
+ post();
- string test2name = name2();
- {
- if( test2name != name() ) {
- dur::stats.curr()->reset();
- mongo::Timer t;
- unsigned long long n = 0;
- while( 1 ) {
- unsigned int i;
- for( i = 0; i < Batch; i++ )
- timed2(client());
- n += i;
- if( t.millis() > hlm )
- break;
- }
- say(n, t.micros(), test2name);
+ string test2name = name2();
+ {
+ if (test2name != name()) {
+ dur::stats.curr()->reset();
+ mongo::Timer t;
+ unsigned long long n = 0;
+ while (1) {
+ unsigned int i;
+ for (i = 0; i < Batch; i++)
+ timed2(client());
+ n += i;
+ if (t.millis() > hlm)
+ break;
}
+ say(n, t.micros(), test2name);
}
+ }
- if( testThreaded() ) {
- const int nThreads = 8;
- //cout << "testThreaded nThreads:" << nThreads << endl;
- mongo::Timer t;
- const unsigned long long result = launchThreads(nThreads);
- say(result/nThreads, t.micros(), test2name+"-threaded");
- }
+ if (testThreaded()) {
+ const int nThreads = 8;
+ // cout << "testThreaded nThreads:" << nThreads << endl;
+ mongo::Timer t;
+ const unsigned long long result = launchThreads(nThreads);
+ say(result / nThreads, t.micros(), test2name + "-threaded");
}
+ }
- bool stop;
+ bool stop;
- void thread(unsigned long long* counter) {
+ void thread(unsigned long long* counter) {
#if defined(_WIN32)
- static int z;
- srand( ++z ^ (unsigned) time(0));
+ static int z;
+ srand(++z ^ (unsigned)time(0));
#endif
- Client::initThreadIfNotAlready("perftestthr");
- OperationContextImpl txn;
- DBDirectClient c(&txn);
-
- const unsigned int Batch = batchSize();
- prepThreaded();
- while( 1 ) {
- unsigned int i = 0;
- for( i = 0; i < Batch; i++ )
- timed2(&c);
- *counter += i;
- if( stop )
- break;
- }
- cc().shutdown();
- }
+ Client::initThreadIfNotAlready("perftestthr");
+ OperationContextImpl txn;
+ DBDirectClient c(&txn);
+
+ const unsigned int Batch = batchSize();
+ prepThreaded();
+ while (1) {
+ unsigned int i = 0;
+ for (i = 0; i < Batch; i++)
+ timed2(&c);
+ *counter += i;
+ if (stop)
+ break;
+ }
+ cc().shutdown();
+ }
- unsigned long long launchThreads(int remaining) {
- stop = false;
- if (!remaining) {
- int hlm = howLong();
- sleepmillis(hlm);
- stop = true;
- return 0;
- }
- unsigned long long counter = 0;
- boost::thread athread(stdx::bind(&B::thread, this, &counter));
- unsigned long long child = launchThreads(remaining - 1);
- athread.join();
- unsigned long long accum = child + counter;
- return accum;
- }
- };
-
- unsigned dontOptimizeOutHopefully = 1;
-
- class NonDurTest : public B {
- public:
- virtual int howLongMillis() { return 3000; }
- virtual bool showDurStats() { return false; }
- };
-
- class BSONIter : public NonDurTest {
- public:
- int n;
- bo b, sub;
- string name() { return "BSONIter"; }
- BSONIter() {
- n = 0;
- bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
- b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
- }
- void timed() {
- for( bo::iterator i = b.begin(); i.more(); )
- if( i.next().fieldName() )
- n++;
- for( bo::iterator i = sub.begin(); i.more(); )
- if( i.next().fieldName() )
- n++;
- }
- };
-
- class BSONGetFields1 : public NonDurTest {
- public:
- int n;
- bo b, sub;
- string name() { return "BSONGetFields1By1"; }
- BSONGetFields1() {
- n = 0;
- bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
- b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
- }
- void timed() {
- if( b["x"].eoo() )
- n++;
- if( b["q"].eoo() )
- n++;
- if( b["zzz"].eoo() )
- n++;
- }
- };
-
- class BSONGetFields2 : public BSONGetFields1 {
- public:
- string name() { return "BSONGetFields"; }
- void timed() {
- static const char *names[] = { "x", "q", "zzz" };
- BSONElement elements[3];
- b.getFields(3, names, elements);
- if( elements[0].eoo() )
- n++;
- if( elements[1].eoo() )
+ unsigned long long launchThreads(int remaining) {
+ stop = false;
+ if (!remaining) {
+ int hlm = howLong();
+ sleepmillis(hlm);
+ stop = true;
+ return 0;
+ }
+ unsigned long long counter = 0;
+ boost::thread athread(stdx::bind(&B::thread, this, &counter));
+ unsigned long long child = launchThreads(remaining - 1);
+ athread.join();
+ unsigned long long accum = child + counter;
+ return accum;
+ }
+};
+
+unsigned dontOptimizeOutHopefully = 1;
+
+class NonDurTest : public B {
+public:
+ virtual int howLongMillis() {
+ return 3000;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+};
+
+class BSONIter : public NonDurTest {
+public:
+ int n;
+ bo b, sub;
+ string name() {
+ return "BSONIter";
+ }
+ BSONIter() {
+ n = 0;
+ bo sub = bob()
+ .appendTimeT("t", time(0))
+ .appendBool("abool", true)
+ .appendBinData("somebin", 3, BinDataGeneral, "abc")
+ .appendNull("anullone")
+ .obj();
+ b = BSON("_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false
+ << "obj" << sub << "zzzzzzz"
+ << "a string a string");
+ }
+ void timed() {
+ for (bo::iterator i = b.begin(); i.more();)
+ if (i.next().fieldName())
n++;
- if( elements[2].eoo() )
+ for (bo::iterator i = sub.begin(); i.more();)
+ if (i.next().fieldName())
n++;
- }
- };
-
- class KeyTest : public B {
- public:
- KeyV1Owned a,b,c;
- string name() { return "Key-woequal"; }
- virtual int howLongMillis() { return 3000; }
- KeyTest() :
- a(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
- b(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
- c(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqqb"))
- {}
- virtual bool showDurStats() { return false; }
- void timed() {
- verify( a.woEqual(b) );
- verify( !a.woEqual(c) );
- }
- };
+ }
+};
+
+class BSONGetFields1 : public NonDurTest {
+public:
+ int n;
+ bo b, sub;
+ string name() {
+ return "BSONGetFields1By1";
+ }
+ BSONGetFields1() {
+ n = 0;
+ bo sub = bob()
+ .appendTimeT("t", time(0))
+ .appendBool("abool", true)
+ .appendBinData("somebin", 3, BinDataGeneral, "abc")
+ .appendNull("anullone")
+ .obj();
+ b = BSON("_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false
+ << "obj" << sub << "zzzzzzz"
+ << "a string a string");
+ }
+ void timed() {
+ if (b["x"].eoo())
+ n++;
+ if (b["q"].eoo())
+ n++;
+ if (b["zzz"].eoo())
+ n++;
+ }
+};
- unsigned long long aaa;
+class BSONGetFields2 : public BSONGetFields1 {
+public:
+ string name() {
+ return "BSONGetFields";
+ }
+ void timed() {
+ static const char* names[] = {"x", "q", "zzz"};
+ BSONElement elements[3];
+ b.getFields(3, names, elements);
+ if (elements[0].eoo())
+ n++;
+ if (elements[1].eoo())
+ n++;
+ if (elements[2].eoo())
+ n++;
+ }
+};
- class Timer : public B {
- public:
- string name() { return "Timer"; }
- virtual int howLongMillis() { return 1000; }
- virtual bool showDurStats() { return false; }
- void timed() {
- mongo::Timer t;
- aaa += t.millis();
- }
- };
-
- class Sleep0Ms : public B {
- public:
- string name() { return "Sleep0Ms"; }
- virtual int howLongMillis() { return 400; }
- virtual bool showDurStats() { return false; }
- void timed() {
- sleepmillis(0);
- aaa++;
- }
- };
+class KeyTest : public B {
+public:
+ KeyV1Owned a, b, c;
+ string name() {
+ return "Key-woequal";
+ }
+ virtual int howLongMillis() {
+ return 3000;
+ }
+ KeyTest()
+ : a(BSON("a" << 1 << "b" << 3.0 << "c"
+ << "qqq")),
+ b(BSON("a" << 1 << "b" << 3.0 << "c"
+ << "qqq")),
+ c(BSON("a" << 1 << "b" << 3.0 << "c"
+ << "qqqb")) {}
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ verify(a.woEqual(b));
+ verify(!a.woEqual(c));
+ }
+};
+
+unsigned long long aaa;
+
+class Timer : public B {
+public:
+ string name() {
+ return "Timer";
+ }
+ virtual int howLongMillis() {
+ return 1000;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ mongo::Timer t;
+ aaa += t.millis();
+ }
+};
+
+class Sleep0Ms : public B {
+public:
+ string name() {
+ return "Sleep0Ms";
+ }
+ virtual int howLongMillis() {
+ return 400;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ sleepmillis(0);
+ aaa++;
+ }
+};
#if defined(__USE_XOPEN2K)
- class Yield : public B {
- public:
- string name() { return "Yield"; }
- virtual int howLongMillis() { return 400; }
- virtual bool showDurStats() { return false; }
- void timed() {
- pthread_yield();
- aaa++;
- }
- };
+class Yield : public B {
+public:
+ string name() {
+ return "Yield";
+ }
+ virtual int howLongMillis() {
+ return 400;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ pthread_yield();
+ aaa++;
+ }
+};
#endif
- RWLock lk("testrw");
- SimpleMutex m("simptst");
- mongo::mutex mtest("mtest");
- boost::mutex mboost;
- boost::timed_mutex mboost_timed;
+RWLock lk("testrw");
+SimpleMutex m("simptst");
+mongo::mutex mtest("mtest");
+boost::mutex mboost;
+boost::timed_mutex mboost_timed;
#if (__cplusplus >= 201103L)
- std::mutex mstd;
- std::timed_mutex mstd_timed;
+std::mutex mstd;
+std::timed_mutex mstd_timed;
#endif
- SpinLock s;
- boost::condition c;
-
- class NotifyOne : public B {
- public:
- string name() { return "notify_one"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- c.notify_one();
- }
- };
- class mutexspeed : public B {
- public:
- string name() { return "mutex"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- mongo::mutex::scoped_lock lk(mtest);
- }
- };
- class boostmutexspeed : public B {
- public:
- string name() { return "boost::mutex"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- boost::mutex::scoped_lock lk(mboost);
- }
- };
- class boosttimed_mutexspeed : public B {
- public:
- string name() { return "boost::timed_mutex"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- boost::timed_mutex::scoped_lock lk(mboost_timed);
- }
- };
- class simplemutexspeed : public B {
- public:
- string name() { return "simplemutex"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- SimpleMutex::scoped_lock lk(m);
- }
- };
+SpinLock s;
+boost::condition c;
+
+class NotifyOne : public B {
+public:
+ string name() {
+ return "notify_one";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ c.notify_one();
+ }
+};
+class mutexspeed : public B {
+public:
+ string name() {
+ return "mutex";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ mongo::mutex::scoped_lock lk(mtest);
+ }
+};
+class boostmutexspeed : public B {
+public:
+ string name() {
+ return "boost::mutex";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ boost::mutex::scoped_lock lk(mboost);
+ }
+};
+class boosttimed_mutexspeed : public B {
+public:
+ string name() {
+ return "boost::timed_mutex";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ boost::timed_mutex::scoped_lock lk(mboost_timed);
+ }
+};
+class simplemutexspeed : public B {
+public:
+ string name() {
+ return "simplemutex";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ SimpleMutex::scoped_lock lk(m);
+ }
+};
#if (__cplusplus >= 201103L)
- class stdmutexspeed : public B {
- public:
- string name() { return "std::mutex"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- std::lock_guard<std::mutex> lk(mstd);
- }
- };
- class stdtimed_mutexspeed : public B {
- public:
- string name() { return "std::timed_mutex"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- std::lock_guard<std::timed_mutex> lk(mstd_timed);
- }
- };
+class stdmutexspeed : public B {
+public:
+ string name() {
+ return "std::mutex";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ std::lock_guard<std::mutex> lk(mstd);
+ }
+};
+class stdtimed_mutexspeed : public B {
+public:
+ string name() {
+ return "std::timed_mutex";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ std::lock_guard<std::timed_mutex> lk(mstd_timed);
+ }
+};
#endif
- class spinlockspeed : public B {
- public:
- string name() { return "spinlock"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- mongo::scoped_spinlock lk(s);
- }
- };
- int cas;
- class casspeed : public B {
- public:
- string name() { return "compareandswap"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
+class spinlockspeed : public B {
+public:
+ string name() {
+ return "spinlock";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ mongo::scoped_spinlock lk(s);
+ }
+};
+int cas;
+class casspeed : public B {
+public:
+ string name() {
+ return "compareandswap";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
#define RUNCOMPARESWAP 1
- __sync_bool_compare_and_swap(&cas, 0, 0);
+ __sync_bool_compare_and_swap(&cas, 0, 0);
#endif
- }
- };
- class rlock : public B {
- public:
- string name() { return "rlock"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- lk.lock_shared();
- lk.unlock_shared();
- }
- };
- class wlock : public B {
- public:
- string name() { return "wlock"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- lk.lock();
- lk.unlock();
- }
- };
-
- class locker_test : public B {
- public:
- boost::thread_specific_ptr<ResourceId> resId;
- boost::thread_specific_ptr<MMAPV1LockerImpl> locker;
- boost::thread_specific_ptr<int> id;
- boost::mutex lock;
-
- // The following members are intitialized in the constructor
- LockMode lockMode;
- LockMode glockMode;
-
- locker_test(LockMode m = MODE_X, LockMode gm = MODE_IX)
- : lockMode(m),
- glockMode(gm) { }
- virtual string name() {
- return (str::stream() << "locker_contested" << lockMode);
- }
- virtual bool showDurStats() { return false; }
- virtual bool testThreaded() { return true; }
- virtual void prep() {
- resId.reset(new ResourceId(RESOURCE_COLLECTION, std::string("TestDB.collection")));
- locker.reset(new MMAPV1LockerImpl());
- }
+ }
+};
+class rlock : public B {
+public:
+ string name() {
+ return "rlock";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ lk.lock_shared();
+ lk.unlock_shared();
+ }
+};
+class wlock : public B {
+public:
+ string name() {
+ return "wlock";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ lk.lock();
+ lk.unlock();
+ }
+};
+
+class locker_test : public B {
+public:
+ boost::thread_specific_ptr<ResourceId> resId;
+ boost::thread_specific_ptr<MMAPV1LockerImpl> locker;
+ boost::thread_specific_ptr<int> id;
+ boost::mutex lock;
+
+ // The following members are intitialized in the constructor
+ LockMode lockMode;
+ LockMode glockMode;
+
+ locker_test(LockMode m = MODE_X, LockMode gm = MODE_IX) : lockMode(m), glockMode(gm) {}
+ virtual string name() {
+ return (str::stream() << "locker_contested" << lockMode);
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ virtual bool testThreaded() {
+ return true;
+ }
+ virtual void prep() {
+ resId.reset(new ResourceId(RESOURCE_COLLECTION, std::string("TestDB.collection")));
+ locker.reset(new MMAPV1LockerImpl());
+ }
- virtual void prepThreaded() {
- resId.reset(new ResourceId(RESOURCE_COLLECTION, std::string("TestDB.collection")));
- id.reset(new int);
- lock.lock();
- lock.unlock();
- locker.reset(new MMAPV1LockerImpl());
- }
+ virtual void prepThreaded() {
+ resId.reset(new ResourceId(RESOURCE_COLLECTION, std::string("TestDB.collection")));
+ id.reset(new int);
+ lock.lock();
+ lock.unlock();
+ locker.reset(new MMAPV1LockerImpl());
+ }
- void timed() {
- locker->lockGlobal(glockMode);
- locker->lock(*resId, lockMode);
- locker->unlockAll();
- }
+ void timed() {
+ locker->lockGlobal(glockMode);
+ locker->lock(*resId, lockMode);
+ locker->unlockAll();
+ }
- void timed2(DBClientBase* c) {
- locker->lockGlobal(glockMode);
- locker->lock(*resId, lockMode);
- locker->unlockAll();
- }
- };
+ void timed2(DBClientBase* c) {
+ locker->lockGlobal(glockMode);
+ locker->lock(*resId, lockMode);
+ locker->unlockAll();
+ }
+};
- class glockerIX : public locker_test {
- public:
- virtual string name() {
- return (str::stream() << "glocker" << glockMode);
- }
+class glockerIX : public locker_test {
+public:
+ virtual string name() {
+ return (str::stream() << "glocker" << glockMode);
+ }
- void timed() {
- locker->lockGlobal(glockMode);
- locker->unlockAll();
- }
+ void timed() {
+ locker->lockGlobal(glockMode);
+ locker->unlockAll();
+ }
- void timed2(DBClientBase* c) {
- locker->lockGlobal(glockMode);
- locker->unlockAll();
- }
- };
-
- class locker_test_uncontested : public locker_test {
- public:
- locker_test_uncontested(LockMode m = MODE_IX, LockMode gm = MODE_IX)
- : locker_test(m, gm) { }
- virtual string name() {
- return (str::stream() << "locker_uncontested" << lockMode);
- }
+ void timed2(DBClientBase* c) {
+ locker->lockGlobal(glockMode);
+ locker->unlockAll();
+ }
+};
- virtual void prepThreaded() {
- id.reset(new int);
+class locker_test_uncontested : public locker_test {
+public:
+ locker_test_uncontested(LockMode m = MODE_IX, LockMode gm = MODE_IX) : locker_test(m, gm) {}
+ virtual string name() {
+ return (str::stream() << "locker_uncontested" << lockMode);
+ }
- lock.lock();
- lock.unlock();
- locker.reset(new LockerImpl<true>);
- resId.reset(new ResourceId(RESOURCE_COLLECTION,
- str::stream() << "TestDB.collection" << *id));
- }
- };
-
-
- class glockerIS : public glockerIX {
- public:
- glockerIS() : glockerIX() { glockMode = MODE_IS; }
- };
-
- class locker_contestedX : public locker_test {
- public:
- locker_contestedX() : locker_test(MODE_X, MODE_IX) { }
- };
-
- class locker_contestedS : public locker_test {
- public:
- locker_contestedS() : locker_test(MODE_S, MODE_IS) { }
- };
-
- class locker_uncontestedX : public locker_test_uncontested {
- public:
- locker_uncontestedX() : locker_test_uncontested(MODE_X, MODE_IX) { }
- };
-
- class locker_uncontestedS : public locker_test_uncontested {
- public:
- locker_uncontestedS() : locker_test_uncontested(MODE_S, MODE_IS) { }
- };
-
- class CTM : public B {
- public:
- CTM() : last(0), delts(0), n(0) { }
- string name() { return "curTimeMillis64"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- unsigned long long last;
- unsigned long long delts;
- unsigned n;
- void timed() {
- unsigned long long x = curTimeMillis64();
- aaa += x;
- if( last ) {
- unsigned long long delt = x-last;
- if( delt ) {
- delts += delt;
- n++;
- }
+ virtual void prepThreaded() {
+ id.reset(new int);
+
+ lock.lock();
+ lock.unlock();
+ locker.reset(new LockerImpl<true>);
+ resId.reset(
+ new ResourceId(RESOURCE_COLLECTION, str::stream() << "TestDB.collection" << *id));
+ }
+};
+
+
+class glockerIS : public glockerIX {
+public:
+ glockerIS() : glockerIX() {
+ glockMode = MODE_IS;
+ }
+};
+
+class locker_contestedX : public locker_test {
+public:
+ locker_contestedX() : locker_test(MODE_X, MODE_IX) {}
+};
+
+class locker_contestedS : public locker_test {
+public:
+ locker_contestedS() : locker_test(MODE_S, MODE_IS) {}
+};
+
+class locker_uncontestedX : public locker_test_uncontested {
+public:
+ locker_uncontestedX() : locker_test_uncontested(MODE_X, MODE_IX) {}
+};
+
+class locker_uncontestedS : public locker_test_uncontested {
+public:
+ locker_uncontestedS() : locker_test_uncontested(MODE_S, MODE_IS) {}
+};
+
+class CTM : public B {
+public:
+ CTM() : last(0), delts(0), n(0) {}
+ string name() {
+ return "curTimeMillis64";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ unsigned long long last;
+ unsigned long long delts;
+ unsigned n;
+ void timed() {
+ unsigned long long x = curTimeMillis64();
+ aaa += x;
+ if (last) {
+ unsigned long long delt = x - last;
+ if (delt) {
+ delts += delt;
+ n++;
}
- last = x;
- }
- void post() {
- // we need to know if timing is highly ungranular - that could be relevant in some places
- if( n )
- cout << " avg timer granularity: " << ((double)delts)/n << "ms " << endl;
}
- };
- class CTMicros : public B {
- public:
- CTMicros() : last(0), delts(0), n(0) { }
- string name() { return "curTimeMicros64"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- unsigned long long last;
- unsigned long long delts;
- unsigned n;
- void timed() {
- unsigned long long x = curTimeMicros64();
- aaa += x;
- if( last ) {
- unsigned long long delt = x-last;
- if( delt ) {
- delts += delt;
- n++;
- }
+ last = x;
+ }
+ void post() {
+ // we need to know if timing is highly ungranular - that could be relevant in some places
+ if (n)
+ cout << " avg timer granularity: " << ((double)delts) / n << "ms " << endl;
+ }
+};
+class CTMicros : public B {
+public:
+ CTMicros() : last(0), delts(0), n(0) {}
+ string name() {
+ return "curTimeMicros64";
+ }
+ virtual int howLongMillis() {
+ return 500;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ unsigned long long last;
+ unsigned long long delts;
+ unsigned n;
+ void timed() {
+ unsigned long long x = curTimeMicros64();
+ aaa += x;
+ if (last) {
+ unsigned long long delt = x - last;
+ if (delt) {
+ delts += delt;
+ n++;
}
- last = x;
- }
- void post() {
- // we need to know if timing is highly ungranular - that could be relevant in some places
- if( n )
- cout << " avg timer granularity: " << ((double)delts)/n << "ms " << endl;
}
- };
+ last = x;
+ }
+ void post() {
+ // we need to know if timing is highly ungranular - that could be relevant in some places
+ if (n)
+ cout << " avg timer granularity: " << ((double)delts) / n << "ms " << endl;
+ }
+};
- class Bldr : public B {
- public:
- int n;
- string name() { return "BufBuilder"; }
- Bldr() {
- }
- virtual int howLongMillis() { return 3000; }
- virtual bool showDurStats() { return false; }
- void timed() {
- BufBuilder b;
- b.appendNum(3);
- b.appendUChar(' ');
- b.appendStr("abcd");
- n += b.len();
- }
- };
-
- class StkBldr : public B {
- public:
- virtual int howLongMillis() { return 3000; }
- int n;
- string name() { return "StackBufBuilder"; }
- virtual bool showDurStats() { return false; }
- void timed() {
- StackBufBuilder b;
- b.appendNum(3);
- b.appendUChar(' ');
- b.appendStr("abcd");
- n += b.len();
- }
- };
-
- // if a test is this fast, it was optimized out
- class Dummy : public B {
- public:
- Dummy() { }
- virtual int howLongMillis() { return 3000; }
- string name() { return "dummy"; }
- void timed() {
- dontOptimizeOutHopefully++;
- }
- virtual bool showDurStats() { return false; }
- };
+class Bldr : public B {
+public:
+ int n;
+ string name() {
+ return "BufBuilder";
+ }
+ Bldr() {}
+ virtual int howLongMillis() {
+ return 3000;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ BufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+};
+
+class StkBldr : public B {
+public:
+ virtual int howLongMillis() {
+ return 3000;
+ }
+ int n;
+ string name() {
+ return "StackBufBuilder";
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ void timed() {
+ StackBufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+};
+
+// if a test is this fast, it was optimized out
+class Dummy : public B {
+public:
+ Dummy() {}
+ virtual int howLongMillis() {
+ return 3000;
+ }
+ string name() {
+ return "dummy";
+ }
+ void timed() {
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+};
- // test thread local speed
+// test thread local speed
#if defined(_WIN32)
- __declspec( thread ) int x;
- class TLS2 : public B {
- public:
- virtual int howLongMillis() { return 3000; }
- string name() { return "thread-local-storage2"; }
- void timed() {
- if( x )
- dontOptimizeOutHopefully++;
- }
- virtual bool showDurStats() { return false; }
- };
+__declspec(thread) int x;
+class TLS2 : public B {
+public:
+ virtual int howLongMillis() {
+ return 3000;
+ }
+ string name() {
+ return "thread-local-storage2";
+ }
+ void timed() {
+ if (x)
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+};
#endif
- // test thread local speed
- class TLS : public B {
- public:
- virtual int howLongMillis() { return 3000; }
- string name() { return "thread-local-storage"; }
- void timed() {
- dontOptimizeOutHopefully++;
- }
- virtual bool showDurStats() { return false; }
- };
-
- bool dummy1 = false;
-
- class TestException : public DBException {
- public:
- TestException() : DBException("testexception",3) { }
- };
- struct Z {
- Z() { dontOptimizeOutHopefully--; }
- ~Z() { dontOptimizeOutHopefully++; }
- };
- void thr1(int n) {
- if( dontOptimizeOutHopefully ) {
+// test thread local speed
+class TLS : public B {
+public:
+ virtual int howLongMillis() {
+ return 3000;
+ }
+ string name() {
+ return "thread-local-storage";
+ }
+ void timed() {
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+};
+
+bool dummy1 = false;
+
+class TestException : public DBException {
+public:
+ TestException() : DBException("testexception", 3) {}
+};
+struct Z {
+ Z() {
+ dontOptimizeOutHopefully--;
+ }
+ ~Z() {
+ dontOptimizeOutHopefully++;
+ }
+};
+void thr1(int n) {
+ if (dontOptimizeOutHopefully) {
+ throw TestException();
+ }
+ mongo::unittest::log() << "hmmm" << endl;
+}
+void thr2(int n) {
+ if (--n <= 0) {
+ if (dontOptimizeOutHopefully) {
throw TestException();
}
mongo::unittest::log() << "hmmm" << endl;
}
- void thr2(int n) {
- if( --n <= 0 ) {
- if( dontOptimizeOutHopefully ) {
- throw TestException();
- }
- mongo::unittest::log() << "hmmm" << endl;
- }
- Z z;
- try {
- thr2(n-1);
- }
- catch(DBException&) {
- }
+ Z z;
+ try {
+ thr2(n - 1);
+ } catch (DBException&) {
}
- void thr3(int n) {
- if( --n <= 0 ) {
- if( dontOptimizeOutHopefully ) {
- throw TestException();
- }
- mongo::unittest::log() << "hmmm" << endl;
- }
- try {
- Z z;
- thr3(n-1);
- }
- catch(DBException&) {
+}
+void thr3(int n) {
+ if (--n <= 0) {
+ if (dontOptimizeOutHopefully) {
+ throw TestException();
}
+ mongo::unittest::log() << "hmmm" << endl;
}
- void thr4(int n) {
- if( --n <= 0 ) {
- if( dontOptimizeOutHopefully ) {
- throw TestException();
- }
- mongo::unittest::log() << "hmmm" << endl;
- }
+ try {
Z z;
- thr4(n-1);
- }
- template< void T (int) >
- class Throw : public B {
- public:
- virtual int howLongMillis() { return 2000; }
- string name() { return "throw"; }
- void timed() {
- try {
- T(10);
- dontOptimizeOutHopefully += 2;
- }
- catch(DBException& e) {
- e.getCode();
- dontOptimizeOutHopefully++;
- }
- }
- virtual bool showDurStats() { return false; }
- };
-
- class New128 : public B {
- public:
- virtual int howLongMillis() { return 2000; }
- string name() { return "new128"; }
- void timed() {
- char *p = new char[128];
- if( dontOptimizeOutHopefully++ > 0 )
- delete[] p;
- }
- virtual bool showDurStats() { return false; }
- };
-
- class New8 : public B {
- public:
- virtual int howLongMillis() { return 2000; }
- string name() { return "new8"; }
- void timed() {
- char *p = new char[8];
- if( dontOptimizeOutHopefully++ > 0 )
- delete[] p;
- }
- virtual bool showDurStats() { return false; }
- };
-
- class Compress : public B {
- public:
- const unsigned sz;
- void *p;
- Compress() : sz(1024*1024*100+3) { }
- virtual unsigned batchSize() { return 1; }
- string name() { return "compress"; }
- virtual bool showDurStats() { return false; }
- virtual int howLongMillis() { return 4000; }
- void prep() {
- p = mongoMalloc(sz);
- // this isn't a fair test as it is mostly rands but we just want a rough perf check
- static int last;
- for (unsigned i = 0; i<sz; i++) {
- int r = rand();
- if( (r & 0x300) == 0x300 )
- r = last;
- ((char*)p)[i] = r;
- last = r;
- }
- }
- size_t last;
- string res;
- void timed() {
- mongo::Timer t;
- string out;
- size_t len = compress((const char *) p, sz, &out);
- bool ok = uncompress(out.c_str(), out.size(), &res);
- ASSERT(ok);
- static unsigned once;
- if( once++ == 0 )
- cout << "compress round trip " << sz/(1024.0*1024) / (t.millis()/1000.0) << "MB/sec\n";
- //cout << len / (1024.0/1024) << " compressed" << endl;
- (void)len; //fix unused error while above line is commented out
+ thr3(n - 1);
+ } catch (DBException&) {
+ }
+}
+void thr4(int n) {
+ if (--n <= 0) {
+ if (dontOptimizeOutHopefully) {
+ throw TestException();
}
- void post() {
- ASSERT( memcmp(res.c_str(), p, sz) == 0 );
- free(p);
+ mongo::unittest::log() << "hmmm" << endl;
+ }
+ Z z;
+ thr4(n - 1);
+}
+template <void T(int)>
+class Throw : public B {
+public:
+ virtual int howLongMillis() {
+ return 2000;
+ }
+ string name() {
+ return "throw";
+ }
+ void timed() {
+ try {
+ T(10);
+ dontOptimizeOutHopefully += 2;
+ } catch (DBException& e) {
+ e.getCode();
+ dontOptimizeOutHopefully++;
}
- };
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+};
- // test speed of checksum method
- class ChecksumTest : public B {
- public:
- const unsigned sz;
- ChecksumTest() : sz(1024*1024*100+3) { }
- string name() { return "checksum"; }
- virtual int howLongMillis() { return 2000; }
- virtual bool showDurStats() { return false; }
- virtual unsigned batchSize() { return 1; }
+class New128 : public B {
+public:
+ virtual int howLongMillis() {
+ return 2000;
+ }
+ string name() {
+ return "new128";
+ }
+ void timed() {
+ char* p = new char[128];
+ if (dontOptimizeOutHopefully++ > 0)
+ delete[] p;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+};
- void *p;
+class New8 : public B {
+public:
+ virtual int howLongMillis() {
+ return 2000;
+ }
+ string name() {
+ return "new8";
+ }
+ void timed() {
+ char* p = new char[8];
+ if (dontOptimizeOutHopefully++ > 0)
+ delete[] p;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+};
+
+class Compress : public B {
+public:
+ const unsigned sz;
+ void* p;
+ Compress() : sz(1024 * 1024 * 100 + 3) {}
+ virtual unsigned batchSize() {
+ return 1;
+ }
+ string name() {
+ return "compress";
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ virtual int howLongMillis() {
+ return 4000;
+ }
+ void prep() {
+ p = mongoMalloc(sz);
+ // this isn't a fair test as it is mostly rands but we just want a rough perf check
+ static int last;
+ for (unsigned i = 0; i < sz; i++) {
+ int r = rand();
+ if ((r & 0x300) == 0x300)
+ r = last;
+ ((char*)p)[i] = r;
+ last = r;
+ }
+ }
+ size_t last;
+ string res;
+ void timed() {
+ mongo::Timer t;
+ string out;
+ size_t len = compress((const char*)p, sz, &out);
+ bool ok = uncompress(out.c_str(), out.size(), &res);
+ ASSERT(ok);
+ static unsigned once;
+ if (once++ == 0)
+ cout << "compress round trip " << sz / (1024.0 * 1024) / (t.millis() / 1000.0)
+ << "MB/sec\n";
+ // cout << len / (1024.0/1024) << " compressed" << endl;
+ (void)len; // fix unused error while above line is commented out
+ }
+ void post() {
+ ASSERT(memcmp(res.c_str(), p, sz) == 0);
+ free(p);
+ }
+};
+
+// test speed of checksum method
+class ChecksumTest : public B {
+public:
+ const unsigned sz;
+ ChecksumTest() : sz(1024 * 1024 * 100 + 3) {}
+ string name() {
+ return "checksum";
+ }
+ virtual int howLongMillis() {
+ return 2000;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
+ virtual unsigned batchSize() {
+ return 1;
+ }
- void prep() {
- {
- // the checksum code assumes 'standard' rollover on addition overflows. let's check that:
- unsigned long long x = 0xffffffffffffffffULL;
- ASSERT( x+2 == 1 );
- }
+ void* p;
- p = malloc(sz);
- for (unsigned i = 0; i<sz; i++)
- ((char*)p)[i] = rand();
+ void prep() {
+ {
+ // the checksum code assumes 'standard' rollover on addition overflows. let's check that:
+ unsigned long long x = 0xffffffffffffffffULL;
+ ASSERT(x + 2 == 1);
}
- Checksum last;
+ p = malloc(sz);
+ for (unsigned i = 0; i < sz; i++)
+ ((char*)p)[i] = rand();
+ }
- void timed() {
- static int i;
- Checksum c;
- c.gen(p, sz);
- if( i == 0 )
- last = c;
- else if( i == 1 ) {
- ASSERT( c == last );
- }
- }
- void post() {
- {
- mongo::Checksum c;
- c.gen(p, sz-1);
- ASSERT( c != last );
- ((char *&)p)[0]++; // check same data, different order, doesn't give same checksum
- ((char *&)p)[1]--;
- c.gen(p, sz);
- ASSERT( c != last );
- ((char *&)p)[1]++; // check same data, different order, doesn't give same checksum (different longwords case)
- ((char *&)p)[8]--;
- c.gen(p, sz);
- ASSERT( c != last );
- }
- free(p);
- }
- };
-
- class InsertDup : public B {
- const BSONObj o;
- public:
- InsertDup() : o( BSON("_id" << 1) ) { } // dup keys
- string name() {
- return "insert-duplicate-_ids";
- }
- void prep() {
- client()->insert( ns(), o );
- }
- void timed() {
- client()->insert( ns(), o );
- }
- void post() {
- verify( client()->count(ns()) == 1 );
- }
- };
-
- class Insert1 : public B {
- const BSONObj x;
- OID oid;
- BSONObj query;
- public:
- virtual int howLongMillis() { return profiling ? 30000 : 5000; }
- Insert1() : x( BSON("x" << 99) ) {
- oid.init();
- query = BSON("_id" << oid);
- i = 0;
- }
- string name() { return "insert-simple"; }
- unsigned i;
- void timed() {
- BSONObj o = BSON( "_id" << i++ << "x" << 99 );
- client()->insert( ns(), o );
- }
- virtual bool testThreaded() {
- if( profiling )
- return false;
- return true;
+ Checksum last;
+
+ void timed() {
+ static int i;
+ Checksum c;
+ c.gen(p, sz);
+ if (i == 0)
+ last = c;
+ else if (i == 1) {
+ ASSERT(c == last);
}
- string name2() {
- return "findOne_by_id";
+ }
+ void post() {
+ {
+ mongo::Checksum c;
+ c.gen(p, sz - 1);
+ ASSERT(c != last);
+ ((char*&)p)[0]++; // check same data, different order, doesn't give same checksum
+ ((char*&)p)[1]--;
+ c.gen(p, sz);
+ ASSERT(c != last);
+ ((char*&)p)
+ [1]++; // check same data, different order, doesn't give same checksum (different longwords case)
+ ((char*&)p)[8]--;
+ c.gen(p, sz);
+ ASSERT(c != last);
}
+ free(p);
+ }
+};
- void timed2(DBClientBase* c) {
- Query q = QUERY( "_id" << (unsigned) (rand() % i) );
- c->findOne(ns(), q);
- }
- void post() {
+class InsertDup : public B {
+ const BSONObj o;
+
+public:
+ InsertDup() : o(BSON("_id" << 1)) {} // dup keys
+ string name() {
+ return "insert-duplicate-_ids";
+ }
+ void prep() {
+ client()->insert(ns(), o);
+ }
+ void timed() {
+ client()->insert(ns(), o);
+ }
+ void post() {
+ verify(client()->count(ns()) == 1);
+ }
+};
+
+class Insert1 : public B {
+ const BSONObj x;
+ OID oid;
+ BSONObj query;
+
+public:
+ virtual int howLongMillis() {
+ return profiling ? 30000 : 5000;
+ }
+ Insert1() : x(BSON("x" << 99)) {
+ oid.init();
+ query = BSON("_id" << oid);
+ i = 0;
+ }
+ string name() {
+ return "insert-simple";
+ }
+ unsigned i;
+ void timed() {
+ BSONObj o = BSON("_id" << i++ << "x" << 99);
+ client()->insert(ns(), o);
+ }
+ virtual bool testThreaded() {
+ if (profiling)
+ return false;
+ return true;
+ }
+ string name2() {
+ return "findOne_by_id";
+ }
+
+ void timed2(DBClientBase* c) {
+ Query q = QUERY("_id" << (unsigned)(rand() % i));
+ c->findOne(ns(), q);
+ }
+ void post() {
#if !defined(_DEBUG)
- verify( client()->count(ns()) > 50 );
+ verify(client()->count(ns()) > 50);
#endif
- }
- };
-
- class InsertBig : public B {
- BSONObj x;
- virtual int howLongMillis() {
- if (sizeof(void*) == 4) {
- // See SERVER-12556 - Running this test for some time causes occasional failures
- // on Windows 32-bit, because the virtual address space is used up and remapping
- // starts to fail. Value of zero means that only one iteration of the test
- // will run.
- //
- return 0;
- }
- return 5000;
- }
- public:
- InsertBig() {
- char buf[200000];
- BSONObjBuilder b;
- b.append("x", 99);
- b.appendBinData("bin", 200000, (BinDataType) 129, buf);
- x = b.obj();
- }
- string name() { return "insert-big"; }
- void timed() {
- client()->insert( ns(), x );
- }
- };
-
- class InsertRandom : public B {
- public:
- virtual int howLongMillis() { return profiling ? 30000 : 5000; }
- string name() { return "random-inserts"; }
- void prep() {
- client()->insert( ns(), BSONObj() );
- ASSERT_OK(dbtests::createIndex(txn(), ns(), BSON("x"<<1)));
- }
- void timed() {
- int x = rand();
- BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
- client()->insert(ns(), y);
- }
- };
+ }
+};
+
+class InsertBig : public B {
+ BSONObj x;
+ virtual int howLongMillis() {
+ if (sizeof(void*) == 4) {
+ // See SERVER-12556 - Running this test for some time causes occasional failures
+ // on Windows 32-bit, because the virtual address space is used up and remapping
+ // starts to fail. Value of zero means that only one iteration of the test
+ // will run.
+ //
+ return 0;
+ }
+ return 5000;
+ }
- /** upserts about 32k records and then keeps updating them
- 2 indexes
- */
- class Update1 : public B {
- public:
- static int rand() {
- return std::rand() & 0x7fff;
- }
- virtual string name() { return "random-upserts"; }
- void prep() {
- client()->insert( ns(), BSONObj() );
- ASSERT_OK(dbtests::createIndex(txn(), ns(), BSON("x"<<1)));
- }
- void timed() {
- int x = rand();
- BSONObj q = BSON("x" << x);
- BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
- client()->update(ns(), q, y, /*upsert*/true);
- }
- virtual bool testThreaded() { return true; }
- virtual string name2() {
- return name()+"-inc";
- }
+public:
+ InsertBig() {
+ char buf[200000];
+ BSONObjBuilder b;
+ b.append("x", 99);
+ b.appendBinData("bin", 200000, (BinDataType)129, buf);
+ x = b.obj();
+ }
+ string name() {
+ return "insert-big";
+ }
+ void timed() {
+ client()->insert(ns(), x);
+ }
+};
- virtual void timed2(DBClientBase* c) {
- static BSONObj I = BSON( "$inc" << BSON( "y" << 1 ) );
- // test some $inc's
- int x = rand();
- BSONObj q = BSON("x" << x);
- c->update(ns(), q, I);
- }
- };
-
- template <typename T>
- class MoreIndexes : public T {
- public:
- string name() { return T::name() + "-more-indexes"; }
- void prep() {
- T::prep();
- ASSERT_OK(dbtests::createIndex(this->txn(), this->ns(), BSON("y"<<1)));
- ASSERT_OK(dbtests::createIndex(this->txn(), this->ns(), BSON("z"<<1)));
- }
- };
-
- // Tests what the worst case is for the overhead of enabling a fail point. If 'fpInjected'
- // is false, then the fail point will be compiled out. If 'fpInjected' is true, then the
- // fail point will be compiled in. Since the conditioned block is more or less trivial, any
- // difference in performance is almost entirely attributable to the cost of checking
- // whether the failpoint is enabled.
- //
- // If fpEnabled is true, then the failpoint will be enabled, using the 'nTimes' model since
- // this looks to be the most expensive code path through the fail point enable detection
- // logic.
- //
- // It makes no sense to trigger the slow path if the fp is not injected, so that will fail
- // to compile.
- template <bool fpInjected, bool fpEnabled>
- class FailPointTest : public B {
- public:
-
- BOOST_STATIC_ASSERT(fpInjected || !fpEnabled);
-
- FailPointTest()
- : B()
- , _value(0) {
- if (fpEnabled) {
- _fp.setMode(
- FailPoint::nTimes,
- std::numeric_limits<FailPoint::ValType>::max());
- verify(_fp.shouldFail());
- } else {
- verify(!_fp.shouldFail());
- }
- }
+class InsertRandom : public B {
+public:
+ virtual int howLongMillis() {
+ return profiling ? 30000 : 5000;
+ }
+ string name() {
+ return "random-inserts";
+ }
+ void prep() {
+ client()->insert(ns(), BSONObj());
+ ASSERT_OK(dbtests::createIndex(txn(), ns(), BSON("x" << 1)));
+ }
+ void timed() {
+ int x = rand();
+ BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
+ client()->insert(ns(), y);
+ }
+};
- virtual string name() {
- return std::string("failpoint")
- + (fpInjected ? "-present" : "-absent")
- + (fpInjected ? (fpEnabled ? "-enabled" : "-disabled") : "");
- }
+/** upserts about 32k records and then keeps updating them
+ 2 indexes
+*/
+class Update1 : public B {
+public:
+ static int rand() {
+ return std::rand() & 0x7fff;
+ }
+ virtual string name() {
+ return "random-upserts";
+ }
+ void prep() {
+ client()->insert(ns(), BSONObj());
+ ASSERT_OK(dbtests::createIndex(txn(), ns(), BSON("x" << 1)));
+ }
+ void timed() {
+ int x = rand();
+ BSONObj q = BSON("x" << x);
+ BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
+ client()->update(ns(), q, y, /*upsert*/ true);
+ }
+ virtual bool testThreaded() {
+ return true;
+ }
+ virtual string name2() {
+ return name() + "-inc";
+ }
- virtual int howLongMillis() { return 5000; }
- virtual bool showDurStats() { return false; }
+ virtual void timed2(DBClientBase* c) {
+ static BSONObj I = BSON("$inc" << BSON("y" << 1));
+ // test some $inc's
+ int x = rand();
+ BSONObj q = BSON("x" << x);
+ c->update(ns(), q, I);
+ }
+};
- virtual void timed() {
- if (MONGO_unlikely(_value != 0) || (fpInjected && MONGO_FAIL_POINT(_fp))) {
- // We should only get here if the failpoint is enabled.
- verify(fpEnabled);
- }
+template <typename T>
+class MoreIndexes : public T {
+public:
+ string name() {
+ return T::name() + "-more-indexes";
+ }
+ void prep() {
+ T::prep();
+ ASSERT_OK(dbtests::createIndex(this->txn(), this->ns(), BSON("y" << 1)));
+ ASSERT_OK(dbtests::createIndex(this->txn(), this->ns(), BSON("z" << 1)));
+ }
+};
+
+// Tests what the worst case is for the overhead of enabling a fail point. If 'fpInjected'
+// is false, then the fail point will be compiled out. If 'fpInjected' is true, then the
+// fail point will be compiled in. Since the conditioned block is more or less trivial, any
+// difference in performance is almost entirely attributable to the cost of checking
+// whether the failpoint is enabled.
+//
+// If fpEnabled is true, then the failpoint will be enabled, using the 'nTimes' model since
+// this looks to be the most expensive code path through the fail point enable detection
+// logic.
+//
+// It makes no sense to trigger the slow path if the fp is not injected, so that will fail
+// to compile.
+template <bool fpInjected, bool fpEnabled>
+class FailPointTest : public B {
+public:
+ BOOST_STATIC_ASSERT(fpInjected || !fpEnabled);
+
+ FailPointTest() : B(), _value(0) {
+ if (fpEnabled) {
+ _fp.setMode(FailPoint::nTimes, std::numeric_limits<FailPoint::ValType>::max());
+ verify(_fp.shouldFail());
+ } else {
+ verify(!_fp.shouldFail());
}
+ }
- virtual string name2() {
- // Will inhibit running 'timed2' as its own test, but will cause it to be run as a
- // threaded test.
- return name();
- }
+ virtual string name() {
+ return std::string("failpoint") + (fpInjected ? "-present" : "-absent") +
+ (fpInjected ? (fpEnabled ? "-enabled" : "-disabled") : "");
+ }
- virtual void timed2(DBClientBase*) {
- // We just want to re-run 'timed' when timed2 is invoked as a threaded test, so it
- // invoke 'timed' statically to avoid overhead of virtual function call.
- this->FailPointTest::timed();
- }
+ virtual int howLongMillis() {
+ return 5000;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
- virtual bool testThreaded() {
- return true;
+ virtual void timed() {
+ if (MONGO_unlikely(_value != 0) || (fpInjected && MONGO_FAIL_POINT(_fp))) {
+ // We should only get here if the failpoint is enabled.
+ verify(fpEnabled);
}
+ }
- private:
- // The failpoint under test.
- FailPoint _fp;
-
- // _value should always be zero for this test to behave as expected, but we don't want
- // the compiler exploiting this fact to compile out our check, so mark it volatile.
- const volatile int _value;
- };
-
- void t() {
- for( int i = 0; i < 20; i++ ) {
- sleepmillis(21);
- string fn = "/tmp/t1";
- DurableMappedFile f;
- unsigned long long len = 1 * 1024 * 1024;
- verify( f.create(fn, len, /*sequential*/rand()%2==0) );
- {
- char *p = (char *) f.getView();
- verify(p);
- // write something to the private view as a test
- strcpy(p, "hello");
- }
- if (storageGlobalParams.dur) {
- char *w = (char *) f.view_write();
- strcpy(w + 6, "world");
- }
- MongoFileFinder ff;
- ASSERT( ff.findByPath(fn) );
- }
+ virtual string name2() {
+ // Will inhibit running 'timed2' as its own test, but will cause it to be run as a
+ // threaded test.
+ return name();
}
- class StatusTestBase : public B {
- public:
- StatusTestBase()
- : _message("Some string data that should not fit in a short string optimization") {
- }
+ virtual void timed2(DBClientBase*) {
+ // We just want to re-run 'timed' when timed2 is invoked as a threaded test, so it
+ // invoke 'timed' statically to avoid overhead of virtual function call.
+ this->FailPointTest::timed();
+ }
- virtual int howLongMillis() { return 2000; }
- virtual bool showDurStats() { return false; }
- protected:
- NOINLINE_DECL Status doThingOK() const {
- return Status::OK();
- }
+ virtual bool testThreaded() {
+ return true;
+ }
- NOINLINE_DECL Status doThingNotOK() const{
- return Status(
- ErrorCodes::InternalError,
- _message,
- 42);
- }
- private:
- const std::string _message;
- };
-
- class ReturnOKStatus : public StatusTestBase {
- public:
- string name() { return "return-ok-status"; }
- void timed() {
- doThingOK();
- }
- };
+private:
+ // The failpoint under test.
+ FailPoint _fp;
+
+ // _value should always be zero for this test to behave as expected, but we don't want
+ // the compiler exploiting this fact to compile out our check, so mark it volatile.
+ const volatile int _value;
+};
+
+void t() {
+ for (int i = 0; i < 20; i++) {
+ sleepmillis(21);
+ string fn = "/tmp/t1";
+ DurableMappedFile f;
+ unsigned long long len = 1 * 1024 * 1024;
+ verify(f.create(fn, len, /*sequential*/ rand() % 2 == 0));
+ {
+ char* p = (char*)f.getView();
+ verify(p);
+ // write something to the private view as a test
+ strcpy(p, "hello");
+ }
+ if (storageGlobalParams.dur) {
+ char* w = (char*)f.view_write();
+ strcpy(w + 6, "world");
+ }
+ MongoFileFinder ff;
+ ASSERT(ff.findByPath(fn));
+ }
+}
- class ReturnNotOKStatus : public StatusTestBase {
- public:
- string name() { return "return-not-ok-status"; }
- void timed() {
- doThingNotOK();
- }
- };
+class StatusTestBase : public B {
+public:
+ StatusTestBase()
+ : _message("Some string data that should not fit in a short string optimization") {}
- class CopyOKStatus : public StatusTestBase {
- public:
- CopyOKStatus()
- : _status(doThingOK()) {}
+ virtual int howLongMillis() {
+ return 2000;
+ }
+ virtual bool showDurStats() {
+ return false;
+ }
- string name() { return "copy-ok-status"; }
- void timed() {
- const Status copy = _status;
- }
+protected:
+ NOINLINE_DECL Status doThingOK() const {
+ return Status::OK();
+ }
- private:
- const Status _status;
- };
+ NOINLINE_DECL Status doThingNotOK() const {
+ return Status(ErrorCodes::InternalError, _message, 42);
+ }
- class CopyNotOKStatus : public StatusTestBase {
- public:
- CopyNotOKStatus()
- : _status(doThingNotOK()) {}
+private:
+ const std::string _message;
+};
- string name() { return "copy-not-ok-status"; }
- void timed() {
- const Status copy = _status;
- }
+class ReturnOKStatus : public StatusTestBase {
+public:
+ string name() {
+ return "return-ok-status";
+ }
+ void timed() {
+ doThingOK();
+ }
+};
+
+class ReturnNotOKStatus : public StatusTestBase {
+public:
+ string name() {
+ return "return-not-ok-status";
+ }
+ void timed() {
+ doThingNotOK();
+ }
+};
- private:
- const Status _status;
- };
+class CopyOKStatus : public StatusTestBase {
+public:
+ CopyOKStatus() : _status(doThingOK()) {}
+
+ string name() {
+ return "copy-ok-status";
+ }
+ void timed() {
+ const Status copy = _status;
+ }
+
+private:
+ const Status _status;
+};
+
+class CopyNotOKStatus : public StatusTestBase {
+public:
+ CopyNotOKStatus() : _status(doThingNotOK()) {}
+
+ string name() {
+ return "copy-not-ok-status";
+ }
+ void timed() {
+ const Status copy = _status;
+ }
+
+private:
+ const Status _status;
+};
#if __cplusplus >= 201103L
- class StatusMoveTestBase : public StatusTestBase {
- public:
- StatusMoveTestBase(bool ok)
- : StatusTestBase()
- , _a(ok ? doThingOK() : doThingNotOK())
- , _b(_a.isOK() ? Status::OK() : Status(_a.code(), _a.reason().c_str(), _a.location())) {
- }
+class StatusMoveTestBase : public StatusTestBase {
+public:
+ StatusMoveTestBase(bool ok)
+ : StatusTestBase(),
+ _a(ok ? doThingOK() : doThingNotOK()),
+ _b(_a.isOK() ? Status::OK() : Status(_a.code(), _a.reason().c_str(), _a.location())) {}
+
+ void timed() {
+ Status temp(std::move(_a));
+ _a = std::move(_b);
+ _b = std::move(temp);
+ }
- void timed() {
- Status temp(std::move(_a));
- _a = std::move(_b);
- _b = std::move(temp);
- }
+protected:
+ Status _a;
+ Status _b;
+};
+
+class MoveOKStatus : public StatusMoveTestBase {
+public:
+ MoveOKStatus() : StatusMoveTestBase(true) {}
+ string name() {
+ return "move-ok-status";
+ }
+};
- protected:
- Status _a;
- Status _b;
- };
-
- class MoveOKStatus : public StatusMoveTestBase {
- public:
- MoveOKStatus()
- : StatusMoveTestBase(true) {}
- string name() { return "move-ok-status"; }
- };
-
- class MoveNotOKStatus : public StatusMoveTestBase {
- public:
- MoveNotOKStatus()
- : StatusMoveTestBase(false) {}
- string name() { return "move-not-ok-status"; }
- };
+class MoveNotOKStatus : public StatusMoveTestBase {
+public:
+ MoveNotOKStatus() : StatusMoveTestBase(false) {}
+ string name() {
+ return "move-not-ok-status";
+ }
+};
#endif
- class All : public Suite {
- public:
- All() : Suite( "perf" ) { }
+class All : public Suite {
+public:
+ All() : Suite("perf") {}
- Result * run( const string& filter, int runsPerTest ) {
- boost::thread a(t);
- Result * res = Suite::run(filter, runsPerTest);
- a.join();
- return res;
- }
+ Result* run(const string& filter, int runsPerTest) {
+ boost::thread a(t);
+ Result* res = Suite::run(filter, runsPerTest);
+ a.join();
+ return res;
+ }
- void setupTests() {
- pstatsConnect();
- cout
- << "stats test rps------ time-- "
- << dur::stats.curr()->_CSVHeader() << endl;
- if( profiling ) {
- add< Insert1 >();
- }
- else {
- add< Dummy >();
- add< ChecksumTest >();
- add< Compress >();
- add< TLS >();
+ void setupTests() {
+ pstatsConnect();
+ cout << "stats test rps------ time-- "
+ << dur::stats.curr()->_CSVHeader() << endl;
+ if (profiling) {
+ add<Insert1>();
+ } else {
+ add<Dummy>();
+ add<ChecksumTest>();
+ add<Compress>();
+ add<TLS>();
#if defined(_WIN32)
- add< TLS2 >();
+ add<TLS2>();
#endif
- add< New8 >();
- add< New128 >();
- add< Throw< thr1 > >();
- add< Throw< thr2 > >();
- add< Throw< thr3 > >();
+ add<New8>();
+ add<New128>();
+ add<Throw<thr1>>();
+ add<Throw<thr2>>();
+ add<Throw<thr3>>();
#if !defined(__clang__) || !defined(MONGO_OPTIMIZED_BUILD)
- // clang-3.2 (and earlier?) miscompiles this test when optimization is on (see
- // SERVER-9767 and SERVER-11183 for additional details, including a link to the
- // LLVM ticket and LLVM fix).
- //
- // Ideally, the test above would also say
- // || (__clang_major__ > 3) || ((__clang_major__ == 3) && (__clang_minor__ > 2))
- // so that the test would still run on known good vesrions of clang; see
- // comments in SERVER-11183 for why that doesn't work.
- //
- // TODO: Remove this when we no longer need to support clang-3.2. We should
- // also consider requiring clang > 3.2 in our configure tests once XCode 5 is
- // ubiquitious.
- add< Throw< thr4 > >();
+ // clang-3.2 (and earlier?) miscompiles this test when optimization is on (see
+ // SERVER-9767 and SERVER-11183 for additional details, including a link to the
+ // LLVM ticket and LLVM fix).
+ //
+ // Ideally, the test above would also say
+ // || (__clang_major__ > 3) || ((__clang_major__ == 3) && (__clang_minor__ > 2))
+ // so that the test would still run on known good vesrions of clang; see
+ // comments in SERVER-11183 for why that doesn't work.
+ //
+ // TODO: Remove this when we no longer need to support clang-3.2. We should
+ // also consider requiring clang > 3.2 in our configure tests once XCode 5 is
+ // ubiquitious.
+ add<Throw<thr4>>();
#endif
- add< Timer >();
- add< Sleep0Ms >();
+ add<Timer>();
+ add<Sleep0Ms>();
#if defined(__USE_XOPEN2K)
- add< Yield >();
+ add<Yield>();
#endif
- add< rlock >();
- add< wlock >();
- add< glockerIX > ();
- add< glockerIS > ();
- add< locker_contestedX >();
- add< locker_uncontestedX >();
- add< locker_contestedS >();
- add< locker_uncontestedS >();
- add< NotifyOne >();
- add< mutexspeed >();
- add< simplemutexspeed >();
- add< boostmutexspeed >();
- add< boosttimed_mutexspeed >();
+ add<rlock>();
+ add<wlock>();
+ add<glockerIX>();
+ add<glockerIS>();
+ add<locker_contestedX>();
+ add<locker_uncontestedX>();
+ add<locker_contestedS>();
+ add<locker_uncontestedS>();
+ add<NotifyOne>();
+ add<mutexspeed>();
+ add<simplemutexspeed>();
+ add<boostmutexspeed>();
+ add<boosttimed_mutexspeed>();
#if (__cplusplus >= 201103L)
- add< stdmutexspeed >();
- add< stdtimed_mutexspeed >();
+ add<stdmutexspeed>();
+ add<stdtimed_mutexspeed>();
#endif
- add< spinlockspeed >();
+ add<spinlockspeed>();
#ifdef RUNCOMPARESWAP
- add< casspeed >();
+ add<casspeed>();
#endif
- add< CTM >();
- add< CTMicros >();
- add< KeyTest >();
- add< Bldr >();
- add< StkBldr >();
- add< BSONIter >();
- add< BSONGetFields1 >();
- add< BSONGetFields2 >();
- //add< TaskQueueTest >();
- add< InsertDup >();
- add< Insert1 >();
- add< InsertRandom >();
- add< MoreIndexes<InsertRandom> >();
- add< Update1 >();
- add< MoreIndexes<Update1> >();
- add< InsertBig >();
- add< FailPointTest<false, false> >();
- add< FailPointTest<true, false> >();
- add< FailPointTest<true, true> >();
-
- add< ReturnOKStatus >();
- add< ReturnNotOKStatus >();
- add< CopyOKStatus >();
- add< CopyNotOKStatus >();
+ add<CTM>();
+ add<CTMicros>();
+ add<KeyTest>();
+ add<Bldr>();
+ add<StkBldr>();
+ add<BSONIter>();
+ add<BSONGetFields1>();
+ add<BSONGetFields2>();
+ // add< TaskQueueTest >();
+ add<InsertDup>();
+ add<Insert1>();
+ add<InsertRandom>();
+ add<MoreIndexes<InsertRandom>>();
+ add<Update1>();
+ add<MoreIndexes<Update1>>();
+ add<InsertBig>();
+ add<FailPointTest<false, false>>();
+ add<FailPointTest<true, false>>();
+ add<FailPointTest<true, true>>();
+
+ add<ReturnOKStatus>();
+ add<ReturnNotOKStatus>();
+ add<CopyOKStatus>();
+ add<CopyNotOKStatus>();
#if __cplusplus >= 201103L
- add< MoveOKStatus >();
- add< MoveNotOKStatus >();
+ add<MoveOKStatus>();
+ add<MoveNotOKStatus>();
#endif
- }
}
- } myall;
+ }
+} myall;
}
diff --git a/src/mongo/dbtests/pipelinetests.cpp b/src/mongo/dbtests/pipelinetests.cpp
index 8a1b106279f..0546933037b 100644
--- a/src/mongo/dbtests/pipelinetests.cpp
+++ b/src/mongo/dbtests/pipelinetests.cpp
@@ -39,406 +39,444 @@
namespace PipelineTests {
- using boost::intrusive_ptr;
- using std::string;
- using std::vector;
-
- namespace FieldPath {
-
- using mongo::FieldPath;
-
- /** FieldPath constructed from empty string. */
- class Empty {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "" ), UserException );
- }
- };
-
- /** FieldPath constructed from empty vector. */
- class EmptyVector {
- public:
- void run() {
- vector<string> vec;
- ASSERT_THROWS( FieldPath path( vec ), MsgAssertionException );
- }
- };
-
- /** FieldPath constructed from a simple string (without dots). */
- class Simple {
- public:
- void run() {
- FieldPath path( "foo" );
- ASSERT_EQUALS( 1U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "foo", path.getPath( false ) );
- ASSERT_EQUALS( "$foo", path.getPath( true ) );
- }
- };
-
- /** FieldPath constructed from a single element vector. */
- class SimpleVector {
- public:
- void run() {
- vector<string> vec( 1, "foo" );
- FieldPath path( vec );
- ASSERT_EQUALS( 1U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "foo", path.getPath( false ) );
- }
- };
-
- /** FieldPath consisting of a '$' character. */
- class DollarSign {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "$" ), UserException );
- }
- };
-
- /** FieldPath with a '$' prefix. */
- class DollarSignPrefix {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "$a" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with one dot. */
- class Dotted {
- public:
- void run() {
- FieldPath path( "foo.bar" );
- ASSERT_EQUALS( 2U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "bar", path.getFieldName( 1 ) );
- ASSERT_EQUALS( "foo.bar", path.getPath( false ) );
- ASSERT_EQUALS( "$foo.bar", path.getPath( true ) );
- }
- };
-
- /** FieldPath constructed from a single element vector containing a dot. */
- class VectorWithDot {
- public:
- void run() {
- vector<string> vec( 1, "fo.o" );
- ASSERT_THROWS( FieldPath path( vec ), UserException );
- }
- };
-
- /** FieldPath constructed from a two element vector. */
- class TwoFieldVector {
- public:
- void run() {
- vector<string> vec;
- vec.push_back( "foo" );
- vec.push_back( "bar" );
- FieldPath path( vec );
- ASSERT_EQUALS( 2U, path.getPathLength() );
- ASSERT_EQUALS( "foo.bar", path.getPath( false ) );
- }
- };
-
- /** FieldPath with a '$' prefix in the second field. */
- class DollarSignPrefixSecondField {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "a.$b" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with two dots. */
- class TwoDotted {
- public:
- void run() {
- FieldPath path( "foo.bar.baz" );
- ASSERT_EQUALS( 3U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "bar", path.getFieldName( 1 ) );
- ASSERT_EQUALS( "baz", path.getFieldName( 2 ) );
- ASSERT_EQUALS( "foo.bar.baz", path.getPath( false ) );
- }
- };
-
- /** FieldPath constructed from a string ending in a dot. */
- class TerminalDot {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "foo." ), UserException );
- }
- };
-
- /** FieldPath constructed from a string beginning with a dot. */
- class PrefixDot {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( ".foo" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with adjacent dots. */
- class AdjacentDots {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "foo..bar" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with one letter between two dots. */
- class LetterBetweenDots {
- public:
- void run() {
- FieldPath path( "foo.a.bar" );
- ASSERT_EQUALS( 3U, path.getPathLength() );
- ASSERT_EQUALS( "foo.a.bar", path.getPath( false ) );
- }
- };
-
- /** FieldPath containing a null character. */
- class NullCharacter {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( string( "foo.b\0r", 7 ) ), UserException );
- }
- };
-
- /** FieldPath constructed with a vector containing a null character. */
- class VectorNullCharacter {
- public:
- void run() {
- vector<string> vec;
- vec.push_back( "foo" );
- vec.push_back( string( "b\0r", 3 ) );
- ASSERT_THROWS( FieldPath path( vec ), UserException );
- }
- };
-
- /** Tail of a FieldPath. */
- class Tail {
- public:
- void run() {
- FieldPath path = FieldPath( "foo.bar" ).tail();
- ASSERT_EQUALS( 1U, path.getPathLength() );
- ASSERT_EQUALS( "bar", path.getPath( false ) );
- }
- };
-
- /** Tail of a FieldPath with three fields. */
- class TailThreeFields {
- public:
- void run() {
- FieldPath path = FieldPath( "foo.bar.baz" ).tail();
- ASSERT_EQUALS( 2U, path.getPathLength() );
- ASSERT_EQUALS( "bar.baz", path.getPath( false ) );
- }
- };
-
- } // namespace FieldPath
-
- namespace Optimizations {
- using namespace mongo;
-
- namespace Sharded {
- class Base {
- public:
- // These all return json arrays of pipeline operators
- virtual string inputPipeJson() = 0;
- virtual string shardPipeJson() = 0;
- virtual string mergePipeJson() = 0;
-
- BSONObj pipelineFromJsonArray(const string& array) {
- return fromjson("{pipeline: " + array + "}");
- }
- virtual void run() {
- const BSONObj inputBson = pipelineFromJsonArray(inputPipeJson());
- const BSONObj shardPipeExpected = pipelineFromJsonArray(shardPipeJson());
- const BSONObj mergePipeExpected = pipelineFromJsonArray(mergePipeJson());
-
- intrusive_ptr<ExpressionContext> ctx =
- new ExpressionContext(&_opCtx, NamespaceString("a.collection"));
- string errmsg;
- intrusive_ptr<Pipeline> mergePipe =
- Pipeline::parseCommand(errmsg, inputBson, ctx);
- ASSERT_EQUALS(errmsg, "");
- ASSERT(mergePipe != NULL);
-
- intrusive_ptr<Pipeline> shardPipe = mergePipe->splitForSharded();
- ASSERT(shardPipe != NULL);
-
- ASSERT_EQUALS(shardPipe->serialize()["pipeline"],
- Value(shardPipeExpected["pipeline"]));
- ASSERT_EQUALS(mergePipe->serialize()["pipeline"],
- Value(mergePipeExpected["pipeline"]));
- }
-
- virtual ~Base() {};
-
- private:
- OperationContextImpl _opCtx;
- };
-
- // General test to make sure all optimizations support empty pipelines
- class Empty : public Base {
- string inputPipeJson() { return "[]"; }
- string shardPipeJson() { return "[]"; }
- string mergePipeJson() { return "[]"; }
- };
-
- namespace moveFinalUnwindFromShardsToMerger {
-
- class OneUnwind : public Base {
- string inputPipeJson() { return "[{$unwind: '$a'}]}"; }
- string shardPipeJson() { return "[]}"; }
- string mergePipeJson() { return "[{$unwind: '$a'}]}"; }
- };
-
- class TwoUnwind : public Base {
- string inputPipeJson() { return "[{$unwind: '$a'}, {$unwind: '$b'}]}"; }
- string shardPipeJson() { return "[]}"; }
- string mergePipeJson() { return "[{$unwind: '$a'}, {$unwind: '$b'}]}"; }
- };
-
- class UnwindNotFinal : public Base {
- string inputPipeJson() { return "[{$unwind: '$a'}, {$match: {a:1}}]}"; }
- string shardPipeJson() { return "[{$unwind: '$a'}, {$match: {a:1}}]}"; }
- string mergePipeJson() { return "[]}"; }
- };
-
- class UnwindWithOther : public Base {
- string inputPipeJson() { return "[{$match: {a:1}}, {$unwind: '$a'}]}"; }
- string shardPipeJson() { return "[{$match: {a:1}}]}"; }
- string mergePipeJson() { return "[{$unwind: '$a'}]}"; }
- };
- } // namespace moveFinalUnwindFromShardsToMerger
-
-
- namespace limitFieldsSentFromShardsToMerger {
- // These tests use $limit to split the pipelines between shards and merger as it is
- // always a split point and neutral in terms of needed fields.
-
- class NeedWholeDoc : public Base {
- string inputPipeJson() { return "[{$limit:1}]"; }
- string shardPipeJson() { return "[{$limit:1}]"; }
- string mergePipeJson() { return "[{$limit:1}]"; }
- };
-
- class JustNeedsId : public Base {
- string inputPipeJson() { return "[{$limit:1}, {$group: {_id: '$_id'}}]"; }
- string shardPipeJson() { return "[{$limit:1}, {$project: {_id:true}}]"; }
- string mergePipeJson() { return "[{$limit:1}, {$group: {_id: '$_id'}}]"; }
- };
-
- class JustNeedsNonId : public Base {
- string inputPipeJson() {
- return "[{$limit:1}, {$group: {_id: '$a.b'}}]";
- }
- string shardPipeJson() {
- return "[{$limit:1}, {$project: {_id: false, a: {b: true}}}]";
- }
- string mergePipeJson() {
- return "[{$limit:1}, {$group: {_id: '$a.b'}}]";
- }
- };
-
- class NothingNeeded : public Base {
- string inputPipeJson() {
- return "[{$limit:1}"
- ",{$group: {_id: {$const: null}, count: {$sum: {$const: 1}}}}"
- "]";
- }
- string shardPipeJson() {
- return "[{$limit:1}"
- ",{$project: {_id: true}}"
- "]";
- }
- string mergePipeJson() {
- return "[{$limit:1}"
- ",{$group: {_id: {$const: null}, count: {$sum: {$const: 1}}}}"
- "]";
- }
- };
-
- class JustNeedsMetadata : public Base {
- // Currently this optimization doesn't handle metadata and the shards assume it
- // needs to be propagated implicitly. Therefore the $project produced should be
- // the same as in NothingNeeded.
- string inputPipeJson() {
- return "[{$limit:1}, {$project: {_id: false, a: {$meta: 'textScore'}}}]";
- }
- string shardPipeJson() {
- return "[{$limit:1}, {$project: {_id: true}}]";
- }
- string mergePipeJson() {
- return "[{$limit:1}, {$project: {_id: false, a: {$meta: 'textScore'}}}]";
- }
- };
-
- class ShardAlreadyExhaustive : public Base {
- // No new project should be added. This test reflects current behavior where the
- // 'a' field is still sent because it is explicitly asked for, even though it
- // isn't actually needed. If this changes in the future, this test will need to
- // change.
- string inputPipeJson() {
- return "[{$project: {_id:true, a:true}}"
- ",{$limit:1}"
- ",{$group: {_id: '$_id'}}"
- "]";
- }
- string shardPipeJson() {
- return "[{$project: {_id:true, a:true}}"
- ",{$limit:1}"
- "]";
- }
- string mergePipeJson() {
- return "[{$limit:1}"
- ",{$group: {_id: '$_id'}}"
- "]";
- }
- };
-
- } // namespace limitFieldsSentFromShardsToMerger
- } // namespace Sharded
- } // namespace Optimizations
-
- class All : public Suite {
- public:
- All() : Suite( "pipeline" ) {
- }
- void setupTests() {
- add<FieldPath::Empty>();
- add<FieldPath::EmptyVector>();
- add<FieldPath::Simple>();
- add<FieldPath::SimpleVector>();
- add<FieldPath::DollarSign>();
- add<FieldPath::DollarSignPrefix>();
- add<FieldPath::Dotted>();
- add<FieldPath::VectorWithDot>();
- add<FieldPath::TwoFieldVector>();
- add<FieldPath::DollarSignPrefixSecondField>();
- add<FieldPath::TwoDotted>();
- add<FieldPath::TerminalDot>();
- add<FieldPath::PrefixDot>();
- add<FieldPath::AdjacentDots>();
- add<FieldPath::LetterBetweenDots>();
- add<FieldPath::NullCharacter>();
- add<FieldPath::VectorNullCharacter>();
- add<FieldPath::Tail>();
- add<FieldPath::TailThreeFields>();
-
- add<Optimizations::Sharded::Empty>();
- add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::OneUnwind>();
- add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::TwoUnwind>();
- add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::UnwindNotFinal>();
- add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::UnwindWithOther>();
- add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::NeedWholeDoc>();
- add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::JustNeedsId>();
- add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::JustNeedsNonId>();
- add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::NothingNeeded>();
- add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::JustNeedsMetadata>();
- add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::ShardAlreadyExhaustive>();
- }
- };
-
- SuiteInstance<All> myall;
-
-} // namespace PipelineTests
+using boost::intrusive_ptr;
+using std::string;
+using std::vector;
+
+namespace FieldPath {
+
+using mongo::FieldPath;
+
+/** FieldPath constructed from empty string. */
+class Empty {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path(""), UserException);
+ }
+};
+
+/** FieldPath constructed from empty vector. */
+class EmptyVector {
+public:
+ void run() {
+ vector<string> vec;
+ ASSERT_THROWS(FieldPath path(vec), MsgAssertionException);
+ }
+};
+
+/** FieldPath constructed from a simple string (without dots). */
+class Simple {
+public:
+ void run() {
+ FieldPath path("foo");
+ ASSERT_EQUALS(1U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("foo", path.getPath(false));
+ ASSERT_EQUALS("$foo", path.getPath(true));
+ }
+};
+
+/** FieldPath constructed from a single element vector. */
+class SimpleVector {
+public:
+ void run() {
+ vector<string> vec(1, "foo");
+ FieldPath path(vec);
+ ASSERT_EQUALS(1U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("foo", path.getPath(false));
+ }
+};
+
+/** FieldPath consisting of a '$' character. */
+class DollarSign {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("$"), UserException);
+ }
+};
+
+/** FieldPath with a '$' prefix. */
+class DollarSignPrefix {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("$a"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with one dot. */
+class Dotted {
+public:
+ void run() {
+ FieldPath path("foo.bar");
+ ASSERT_EQUALS(2U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("bar", path.getFieldName(1));
+ ASSERT_EQUALS("foo.bar", path.getPath(false));
+ ASSERT_EQUALS("$foo.bar", path.getPath(true));
+ }
+};
+
+/** FieldPath constructed from a single element vector containing a dot. */
+class VectorWithDot {
+public:
+ void run() {
+ vector<string> vec(1, "fo.o");
+ ASSERT_THROWS(FieldPath path(vec), UserException);
+ }
+};
+
+/** FieldPath constructed from a two element vector. */
+class TwoFieldVector {
+public:
+ void run() {
+ vector<string> vec;
+ vec.push_back("foo");
+ vec.push_back("bar");
+ FieldPath path(vec);
+ ASSERT_EQUALS(2U, path.getPathLength());
+ ASSERT_EQUALS("foo.bar", path.getPath(false));
+ }
+};
+
+/** FieldPath with a '$' prefix in the second field. */
+class DollarSignPrefixSecondField {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("a.$b"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with two dots. */
+class TwoDotted {
+public:
+ void run() {
+ FieldPath path("foo.bar.baz");
+ ASSERT_EQUALS(3U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("bar", path.getFieldName(1));
+ ASSERT_EQUALS("baz", path.getFieldName(2));
+ ASSERT_EQUALS("foo.bar.baz", path.getPath(false));
+ }
+};
+
+/** FieldPath constructed from a string ending in a dot. */
+class TerminalDot {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("foo."), UserException);
+ }
+};
+
+/** FieldPath constructed from a string beginning with a dot. */
+class PrefixDot {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path(".foo"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with adjacent dots. */
+class AdjacentDots {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("foo..bar"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with one letter between two dots. */
+class LetterBetweenDots {
+public:
+ void run() {
+ FieldPath path("foo.a.bar");
+ ASSERT_EQUALS(3U, path.getPathLength());
+ ASSERT_EQUALS("foo.a.bar", path.getPath(false));
+ }
+};
+
+/** FieldPath containing a null character. */
+class NullCharacter {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path(string("foo.b\0r", 7)), UserException);
+ }
+};
+
+/** FieldPath constructed with a vector containing a null character. */
+class VectorNullCharacter {
+public:
+ void run() {
+ vector<string> vec;
+ vec.push_back("foo");
+ vec.push_back(string("b\0r", 3));
+ ASSERT_THROWS(FieldPath path(vec), UserException);
+ }
+};
+
+/** Tail of a FieldPath. */
+class Tail {
+public:
+ void run() {
+ FieldPath path = FieldPath("foo.bar").tail();
+ ASSERT_EQUALS(1U, path.getPathLength());
+ ASSERT_EQUALS("bar", path.getPath(false));
+ }
+};
+
+/** Tail of a FieldPath with three fields. */
+class TailThreeFields {
+public:
+ void run() {
+ FieldPath path = FieldPath("foo.bar.baz").tail();
+ ASSERT_EQUALS(2U, path.getPathLength());
+ ASSERT_EQUALS("bar.baz", path.getPath(false));
+ }
+};
+
+} // namespace FieldPath
+
+namespace Optimizations {
+using namespace mongo;
+
+namespace Sharded {
+class Base {
+public:
+ // These all return json arrays of pipeline operators
+ virtual string inputPipeJson() = 0;
+ virtual string shardPipeJson() = 0;
+ virtual string mergePipeJson() = 0;
+
+ BSONObj pipelineFromJsonArray(const string& array) {
+ return fromjson("{pipeline: " + array + "}");
+ }
+ virtual void run() {
+ const BSONObj inputBson = pipelineFromJsonArray(inputPipeJson());
+ const BSONObj shardPipeExpected = pipelineFromJsonArray(shardPipeJson());
+ const BSONObj mergePipeExpected = pipelineFromJsonArray(mergePipeJson());
+
+ intrusive_ptr<ExpressionContext> ctx =
+ new ExpressionContext(&_opCtx, NamespaceString("a.collection"));
+ string errmsg;
+ intrusive_ptr<Pipeline> mergePipe = Pipeline::parseCommand(errmsg, inputBson, ctx);
+ ASSERT_EQUALS(errmsg, "");
+ ASSERT(mergePipe != NULL);
+
+ intrusive_ptr<Pipeline> shardPipe = mergePipe->splitForSharded();
+ ASSERT(shardPipe != NULL);
+
+ ASSERT_EQUALS(shardPipe->serialize()["pipeline"], Value(shardPipeExpected["pipeline"]));
+ ASSERT_EQUALS(mergePipe->serialize()["pipeline"], Value(mergePipeExpected["pipeline"]));
+ }
+
+ virtual ~Base(){};
+
+private:
+ OperationContextImpl _opCtx;
+};
+
+// General test to make sure all optimizations support empty pipelines
+class Empty : public Base {
+ string inputPipeJson() {
+ return "[]";
+ }
+ string shardPipeJson() {
+ return "[]";
+ }
+ string mergePipeJson() {
+ return "[]";
+ }
+};
+
+namespace moveFinalUnwindFromShardsToMerger {
+
+class OneUnwind : public Base {
+ string inputPipeJson() {
+ return "[{$unwind: '$a'}]}";
+ }
+ string shardPipeJson() {
+ return "[]}";
+ }
+ string mergePipeJson() {
+ return "[{$unwind: '$a'}]}";
+ }
+};
+
+class TwoUnwind : public Base {
+ string inputPipeJson() {
+ return "[{$unwind: '$a'}, {$unwind: '$b'}]}";
+ }
+ string shardPipeJson() {
+ return "[]}";
+ }
+ string mergePipeJson() {
+ return "[{$unwind: '$a'}, {$unwind: '$b'}]}";
+ }
+};
+
+class UnwindNotFinal : public Base {
+ string inputPipeJson() {
+ return "[{$unwind: '$a'}, {$match: {a:1}}]}";
+ }
+ string shardPipeJson() {
+ return "[{$unwind: '$a'}, {$match: {a:1}}]}";
+ }
+ string mergePipeJson() {
+ return "[]}";
+ }
+};
+
+class UnwindWithOther : public Base {
+ string inputPipeJson() {
+ return "[{$match: {a:1}}, {$unwind: '$a'}]}";
+ }
+ string shardPipeJson() {
+ return "[{$match: {a:1}}]}";
+ }
+ string mergePipeJson() {
+ return "[{$unwind: '$a'}]}";
+ }
+};
+} // namespace moveFinalUnwindFromShardsToMerger
+
+
+namespace limitFieldsSentFromShardsToMerger {
+// These tests use $limit to split the pipelines between shards and merger as it is
+// always a split point and neutral in terms of needed fields.
+
+class NeedWholeDoc : public Base {
+ string inputPipeJson() {
+ return "[{$limit:1}]";
+ }
+ string shardPipeJson() {
+ return "[{$limit:1}]";
+ }
+ string mergePipeJson() {
+ return "[{$limit:1}]";
+ }
+};
+
+class JustNeedsId : public Base {
+ string inputPipeJson() {
+ return "[{$limit:1}, {$group: {_id: '$_id'}}]";
+ }
+ string shardPipeJson() {
+ return "[{$limit:1}, {$project: {_id:true}}]";
+ }
+ string mergePipeJson() {
+ return "[{$limit:1}, {$group: {_id: '$_id'}}]";
+ }
+};
+
+class JustNeedsNonId : public Base {
+ string inputPipeJson() {
+ return "[{$limit:1}, {$group: {_id: '$a.b'}}]";
+ }
+ string shardPipeJson() {
+ return "[{$limit:1}, {$project: {_id: false, a: {b: true}}}]";
+ }
+ string mergePipeJson() {
+ return "[{$limit:1}, {$group: {_id: '$a.b'}}]";
+ }
+};
+
+class NothingNeeded : public Base {
+ string inputPipeJson() {
+ return "[{$limit:1}"
+ ",{$group: {_id: {$const: null}, count: {$sum: {$const: 1}}}}"
+ "]";
+ }
+ string shardPipeJson() {
+ return "[{$limit:1}"
+ ",{$project: {_id: true}}"
+ "]";
+ }
+ string mergePipeJson() {
+ return "[{$limit:1}"
+ ",{$group: {_id: {$const: null}, count: {$sum: {$const: 1}}}}"
+ "]";
+ }
+};
+
+class JustNeedsMetadata : public Base {
+ // Currently this optimization doesn't handle metadata and the shards assume it
+ // needs to be propagated implicitly. Therefore the $project produced should be
+ // the same as in NothingNeeded.
+ string inputPipeJson() {
+ return "[{$limit:1}, {$project: {_id: false, a: {$meta: 'textScore'}}}]";
+ }
+ string shardPipeJson() {
+ return "[{$limit:1}, {$project: {_id: true}}]";
+ }
+ string mergePipeJson() {
+ return "[{$limit:1}, {$project: {_id: false, a: {$meta: 'textScore'}}}]";
+ }
+};
+
+class ShardAlreadyExhaustive : public Base {
+ // No new project should be added. This test reflects current behavior where the
+ // 'a' field is still sent because it is explicitly asked for, even though it
+ // isn't actually needed. If this changes in the future, this test will need to
+ // change.
+ string inputPipeJson() {
+ return "[{$project: {_id:true, a:true}}"
+ ",{$limit:1}"
+ ",{$group: {_id: '$_id'}}"
+ "]";
+ }
+ string shardPipeJson() {
+ return "[{$project: {_id:true, a:true}}"
+ ",{$limit:1}"
+ "]";
+ }
+ string mergePipeJson() {
+ return "[{$limit:1}"
+ ",{$group: {_id: '$_id'}}"
+ "]";
+ }
+};
+
+} // namespace limitFieldsSentFromShardsToMerger
+} // namespace Sharded
+} // namespace Optimizations
+
+class All : public Suite {
+public:
+ All() : Suite("pipeline") {}
+ void setupTests() {
+ add<FieldPath::Empty>();
+ add<FieldPath::EmptyVector>();
+ add<FieldPath::Simple>();
+ add<FieldPath::SimpleVector>();
+ add<FieldPath::DollarSign>();
+ add<FieldPath::DollarSignPrefix>();
+ add<FieldPath::Dotted>();
+ add<FieldPath::VectorWithDot>();
+ add<FieldPath::TwoFieldVector>();
+ add<FieldPath::DollarSignPrefixSecondField>();
+ add<FieldPath::TwoDotted>();
+ add<FieldPath::TerminalDot>();
+ add<FieldPath::PrefixDot>();
+ add<FieldPath::AdjacentDots>();
+ add<FieldPath::LetterBetweenDots>();
+ add<FieldPath::NullCharacter>();
+ add<FieldPath::VectorNullCharacter>();
+ add<FieldPath::Tail>();
+ add<FieldPath::TailThreeFields>();
+
+ add<Optimizations::Sharded::Empty>();
+ add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::OneUnwind>();
+ add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::TwoUnwind>();
+ add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::UnwindNotFinal>();
+ add<Optimizations::Sharded::moveFinalUnwindFromShardsToMerger::UnwindWithOther>();
+ add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::NeedWholeDoc>();
+ add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::JustNeedsId>();
+ add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::JustNeedsNonId>();
+ add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::NothingNeeded>();
+ add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::JustNeedsMetadata>();
+ add<Optimizations::Sharded::limitFieldsSentFromShardsToMerger::ShardAlreadyExhaustive>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace PipelineTests
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index ce6fd53a7a2..6ddad38b15e 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -52,802 +52,773 @@
namespace mongo {
- // How we access the external setParameter testing bool.
- extern bool internalQueryForceIntersectionPlans;
+// How we access the external setParameter testing bool.
+extern bool internalQueryForceIntersectionPlans;
- extern bool internalQueryPlannerEnableHashIntersection;
+extern bool internalQueryPlannerEnableHashIntersection;
} // namespace mongo
namespace PlanRankingTests {
- using boost::scoped_ptr;
- using std::vector;
+using boost::scoped_ptr;
+using std::vector;
- static const char* ns = "unittests.PlanRankingTests";
+static const char* ns = "unittests.PlanRankingTests";
- class PlanRankingTestBase {
- public:
- PlanRankingTestBase()
- : _internalQueryForceIntersectionPlans(internalQueryForceIntersectionPlans),
- _enableHashIntersection(internalQueryPlannerEnableHashIntersection),
- _client(&_txn) {
+class PlanRankingTestBase {
+public:
+ PlanRankingTestBase()
+ : _internalQueryForceIntersectionPlans(internalQueryForceIntersectionPlans),
+ _enableHashIntersection(internalQueryPlannerEnableHashIntersection),
+ _client(&_txn) {
+ // Run all tests with hash-based intersection enabled.
+ internalQueryPlannerEnableHashIntersection = true;
- // Run all tests with hash-based intersection enabled.
- internalQueryPlannerEnableHashIntersection = true;
+ Client::WriteContext ctx(&_txn, ns);
+ _client.dropCollection(ns);
+ }
- Client::WriteContext ctx(&_txn, ns);
- _client.dropCollection(ns);
- }
+ virtual ~PlanRankingTestBase() {
+ // Restore external setParameter testing bools.
+ internalQueryForceIntersectionPlans = _internalQueryForceIntersectionPlans;
+ internalQueryPlannerEnableHashIntersection = _enableHashIntersection;
+ }
- virtual ~PlanRankingTestBase() {
- // Restore external setParameter testing bools.
- internalQueryForceIntersectionPlans = _internalQueryForceIntersectionPlans;
- internalQueryPlannerEnableHashIntersection = _enableHashIntersection;
- }
+ void insert(const BSONObj& obj) {
+ Client::WriteContext ctx(&_txn, ns);
+ _client.insert(ns, obj);
+ }
- void insert(const BSONObj& obj) {
- Client::WriteContext ctx(&_txn, ns);
- _client.insert(ns, obj);
- }
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, obj));
+ }
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns, obj));
+ /**
+ * Use the MultiPlanRunner to pick the best plan for the query 'cq'. Goes through
+ * normal planning to generate solutions and feeds them to the MPR.
+ *
+ * Takes ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*.
+ */
+ QuerySolution* pickBestPlan(CanonicalQuery* cq) {
+ AutoGetCollectionForRead ctx(&_txn, ns);
+ Collection* collection = ctx.getCollection();
+
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_txn, collection, cq, &plannerParams);
+ // Turn this off otherwise it pops up in some plans.
+ plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;
+
+ // Plan.
+ vector<QuerySolution*> solutions;
+ Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
+ ASSERT(status.isOK());
+
+ ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);
+
+ // Fill out the MPR.
+ _mps.reset(new MultiPlanStage(&_txn, collection, cq));
+ boost::scoped_ptr<WorkingSet> ws(new WorkingSet());
+ // Put each solution from the planner into the MPR.
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ PlanStage* root;
+ ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws.get(), &root));
+ // Takes ownership of all (actually some) arguments.
+ _mps->addPlan(solutions[i], root, ws.get());
}
+ // This is what sets a backup plan, should we test for it. NULL means that there
+ // is no yield policy for this MultiPlanStage's plan selection.
+ _mps->pickBestPlan(NULL);
+ ASSERT(_mps->bestPlanChosen());
- /**
- * Use the MultiPlanRunner to pick the best plan for the query 'cq'. Goes through
- * normal planning to generate solutions and feeds them to the MPR.
- *
- * Takes ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*.
- */
- QuerySolution* pickBestPlan(CanonicalQuery* cq) {
- AutoGetCollectionForRead ctx(&_txn, ns);
- Collection* collection = ctx.getCollection();
-
- QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq, &plannerParams);
- // Turn this off otherwise it pops up in some plans.
- plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;
-
- // Plan.
- vector<QuerySolution*> solutions;
- Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
- ASSERT(status.isOK());
-
- ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);
-
- // Fill out the MPR.
- _mps.reset(new MultiPlanStage(&_txn, collection, cq));
- boost::scoped_ptr<WorkingSet> ws(new WorkingSet());
- // Put each solution from the planner into the MPR.
- for (size_t i = 0; i < solutions.size(); ++i) {
- PlanStage* root;
- ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws.get(), &root));
- // Takes ownership of all (actually some) arguments.
- _mps->addPlan(solutions[i], root, ws.get());
- }
- // This is what sets a backup plan, should we test for it. NULL means that there
- // is no yield policy for this MultiPlanStage's plan selection.
- _mps->pickBestPlan(NULL);
- ASSERT(_mps->bestPlanChosen());
-
- size_t bestPlanIdx = _mps->bestPlanIdx();
- ASSERT_LESS_THAN(bestPlanIdx, solutions.size());
-
- // And return a pointer to the best solution.
- return _mps->bestSolution();
- }
+ size_t bestPlanIdx = _mps->bestPlanIdx();
+ ASSERT_LESS_THAN(bestPlanIdx, solutions.size());
- /**
- * Was a backup plan picked during the ranking process?
- */
- bool hasBackupPlan() const {
- ASSERT(NULL != _mps.get());
- return _mps->hasBackupPlan();
- }
+ // And return a pointer to the best solution.
+ return _mps->bestSolution();
+ }
- protected:
- // A large number, which must be larger than the number of times
- // candidate plans are worked by the multi plan runner. Used for
- // determining the number of documents in the tests below.
- static const int N;
+ /**
+ * Was a backup plan picked during the ranking process?
+ */
+ bool hasBackupPlan() const {
+ ASSERT(NULL != _mps.get());
+ return _mps->hasBackupPlan();
+ }
- OperationContextImpl _txn;
+protected:
+ // A large number, which must be larger than the number of times
+ // candidate plans are worked by the multi plan runner. Used for
+ // determining the number of documents in the tests below.
+ static const int N;
- private:
- // Holds the value of global "internalQueryForceIntersectionPlans" setParameter flag.
- // Restored at end of test invocation regardless of test result.
- bool _internalQueryForceIntersectionPlans;
+ OperationContextImpl _txn;
- // Holds the value of the global set parameter so it can be restored at the end
- // of the test.
- bool _enableHashIntersection;
+private:
+ // Holds the value of global "internalQueryForceIntersectionPlans" setParameter flag.
+ // Restored at end of test invocation regardless of test result.
+ bool _internalQueryForceIntersectionPlans;
- scoped_ptr<MultiPlanStage> _mps;
+ // Holds the value of the global set parameter so it can be restored at the end
+ // of the test.
+ bool _enableHashIntersection;
- DBDirectClient _client;
- };
+ scoped_ptr<MultiPlanStage> _mps;
- // static
- const int PlanRankingTestBase::N = internalQueryPlanEvaluationWorks + 1000;
+ DBDirectClient _client;
+};
- /**
- * Test that the "prefer ixisect" parameter works.
- */
- class PlanRankingIntersectOverride : public PlanRankingTestBase {
- public:
- void run() {
- // 'a' is very selective, 'b' is not.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << i << "b" << 1));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Run the query {a:4, b:1}.
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns, BSON("a" << 100 << "b" << 1), &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // {a:100} is super selective so choose that.
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {filter: {b:1}, node: {ixscan: {pattern: {a: 1}}}}}",
- soln->root.get()));
-
- // Turn on the "force intersect" option.
- // This will be reverted by PlanRankingTestBase's destructor when the test completes.
- internalQueryForceIntersectionPlans = true;
-
- // And run the same query again.
- ASSERT(CanonicalQuery::canonicalize(ns, BSON("a" << 100 << "b" << 1), &cq).isOK());
- boost::scoped_ptr<CanonicalQuery> killCq2(cq);
-
- // With the "ranking picks ixisect always" option we pick an intersection plan that uses
- // both the {a:1} and {b:1} indices even though it performs poorly.
-
- // Takes ownership of cq.
- soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
- soln->root.get()));
- }
- };
+// static
+const int PlanRankingTestBase::N = internalQueryPlanEvaluationWorks + 1000;
- /**
- * Test that a hashed AND solution plan is picked along with a non-blocking backup solution.
- */
- class PlanRankingIntersectWithBackup : public PlanRankingTestBase {
- public:
- void run() {
- // 'a' is very selective, 'b' is not.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << i << "b" << 1));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Run the query {a:1, b:{$gt:1}.
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns, BSON("a" << 1 << "b" << BSON("$gt" << 1)),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Turn on the "force intersect" option.
- // This will be reverted by PlanRankingTestBase's destructor when the test completes.
- internalQueryForceIntersectionPlans = true;
-
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
- soln->root.get()));
-
- // Confirm that a backup plan is available.
- ASSERT(hasBackupPlan());
+/**
+ * Test that the "prefer ixisect" parameter works.
+ */
+class PlanRankingIntersectOverride : public PlanRankingTestBase {
+public:
+ void run() {
+ // 'a' is very selective, 'b' is not.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << i << "b" << 1));
}
- };
- /**
- * Two plans hit EOF at the same time, but one is covered. Make sure that we prefer the covered
- * plan.
- */
- class PlanRankingPreferCovered : public PlanRankingTestBase {
- public:
- void run() {
- // Insert data {a:i, b:i}. Index {a:1} and {a:1, b:1}, query on 'a', projection on 'a'
- // and 'b'. Should prefer the second index as we can pull the 'b' data out.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << i << "b" << i));
- }
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << 1 << "b" << 1));
-
- // Query for a==27 with projection that wants 'a' and 'b'. BSONObj() is for sort.
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- BSON("a" << 27),
- BSONObj(),
- BSON("_id" << 0 << "a" << 1 << "b" << 1),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
-
- // Prefer the fully covered plan.
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{proj: {spec: {_id:0, a:1, b:1}, node: {ixscan: {pattern: {a: 1, b:1}}}}}",
- soln->root.get()));
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Run the query {a:4, b:1}.
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns, BSON("a" << 100 << "b" << 1), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // {a:100} is super selective so choose that.
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {filter: {b:1}, node: {ixscan: {pattern: {a: 1}}}}}", soln->root.get()));
+
+ // Turn on the "force intersect" option.
+ // This will be reverted by PlanRankingTestBase's destructor when the test completes.
+ internalQueryForceIntersectionPlans = true;
+
+ // And run the same query again.
+ ASSERT(CanonicalQuery::canonicalize(ns, BSON("a" << 100 << "b" << 1), &cq).isOK());
+ boost::scoped_ptr<CanonicalQuery> killCq2(cq);
+
+ // With the "ranking picks ixisect always" option we pick an intersection plan that uses
+ // both the {a:1} and {b:1} indices even though it performs poorly.
+
+ // Takes ownership of cq.
+ soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * Test that a hashed AND solution plan is picked along with a non-blocking backup solution.
+ */
+class PlanRankingIntersectWithBackup : public PlanRankingTestBase {
+public:
+ void run() {
+ // 'a' is very selective, 'b' is not.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << i << "b" << 1));
}
- };
- /**
- * No plan produces any results or hits EOF. In this case we should never choose an index
- * intersection solution.
- */
- class PlanRankingAvoidIntersectIfNoResults : public PlanRankingTestBase {
- public:
- void run() {
- // We insert lots of copies of {a:1, b:1, c: 20}. We have the indices {a:1} and {b:1},
- // and the query is {a:1, b:1, c: 999}. No data that matches the query but we won't
- // know that during plan ranking. We don't want to choose an intersection plan here.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "b" << 1 << "c" << 20));
- }
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // There is no data that matches this query but we don't know that until EOF.
- CanonicalQuery* cq;
- BSONObj queryObj = BSON("a" << 1 << "b" << 1 << "c" << 99);
- ASSERT(CanonicalQuery::canonicalize(ns, queryObj, &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
-
- // Anti-prefer the intersection plan.
- bool bestIsScanOverA = QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}",
- soln->root.get());
- bool bestIsScanOverB = QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {ixscan: {pattern: {b: 1}}}}}",
- soln->root.get());
- ASSERT(bestIsScanOverA || bestIsScanOverB);
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Run the query {a:1, b:{$gt:1}.
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns, BSON("a" << 1 << "b" << BSON("$gt" << 1)), &cq)
+ .isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Turn on the "force intersect" option.
+ // This will be reverted by PlanRankingTestBase's destructor when the test completes.
+ internalQueryForceIntersectionPlans = true;
+
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
+ soln->root.get()));
+
+ // Confirm that a backup plan is available.
+ ASSERT(hasBackupPlan());
+ }
+};
+
+/**
+ * Two plans hit EOF at the same time, but one is covered. Make sure that we prefer the covered
+ * plan.
+ */
+class PlanRankingPreferCovered : public PlanRankingTestBase {
+public:
+ void run() {
+ // Insert data {a:i, b:i}. Index {a:1} and {a:1, b:1}, query on 'a', projection on 'a'
+ // and 'b'. Should prefer the second index as we can pull the 'b' data out.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << i << "b" << i));
}
- };
- /**
- * No plan produces any results or hits EOF. In this case we should prefer covered solutions to
- * non-covered solutions.
- */
- class PlanRankingPreferCoveredEvenIfNoResults : public PlanRankingTestBase {
- public:
- void run() {
- // We insert lots of copies of {a:1, b:1}. We have the indices {a:1} and {a:1, b:1},
- // the query is for a doc that doesn't exist, but there is a projection over 'a' and
- // 'b'. We should prefer the index that provides a covered query.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "b" << 1));
- }
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << 1 << "b" << 1));
-
- // There is no data that matches this query ({a:2}). Both scans will hit EOF before
- // returning any data.
-
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- BSON("a" << 2),
- BSONObj(),
- BSON("_id" << 0 << "a" << 1 << "b" << 1),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
- // Prefer the fully covered plan.
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{proj: {spec: {_id:0, a:1, b:1}, node: {ixscan: {pattern: {a: 1, b:1}}}}}",
- soln->root.get()));
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ // Query for a==27 with projection that wants 'a' and 'b'. BSONObj() is for sort.
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(
+ ns, BSON("a" << 27), BSONObj(), BSON("_id" << 0 << "a" << 1 << "b" << 1), &cq)
+ .isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+
+ // Prefer the fully covered plan.
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{proj: {spec: {_id:0, a:1, b:1}, node: {ixscan: {pattern: {a: 1, b:1}}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * No plan produces any results or hits EOF. In this case we should never choose an index
+ * intersection solution.
+ */
+class PlanRankingAvoidIntersectIfNoResults : public PlanRankingTestBase {
+public:
+ void run() {
+ // We insert lots of copies of {a:1, b:1, c: 20}. We have the indices {a:1} and {b:1},
+ // and the query is {a:1, b:1, c: 999}. No data that matches the query but we won't
+ // know that during plan ranking. We don't want to choose an intersection plan here.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "b" << 1 << "c" << 20));
}
- };
- /**
- * We have an index on "a" which is somewhat selective and an index on "b" which is highly
- * selective (will cause an immediate EOF). Make sure that a query with predicates on both "a"
- * and "b" will use the index on "b".
- */
- class PlanRankingPreferImmediateEOF : public PlanRankingTestBase {
- public:
- void run() {
- // 'a' is very selective, 'b' is not.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << i << "b" << 1));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Run the query {a:N+1, b:1}. (No such document.)
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns, BSON("a" << N + 1 << "b" << 1), &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // {a: 100} is super selective so choose that.
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {filter: {b:1}, node: {ixscan: {pattern: {a: 1}}}}}",
- soln->root.get()));
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // There is no data that matches this query but we don't know that until EOF.
+ CanonicalQuery* cq;
+ BSONObj queryObj = BSON("a" << 1 << "b" << 1 << "c" << 99);
+ ASSERT(CanonicalQuery::canonicalize(ns, queryObj, &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+
+ // Anti-prefer the intersection plan.
+ bool bestIsScanOverA = QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}", soln->root.get());
+ bool bestIsScanOverB = QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {ixscan: {pattern: {b: 1}}}}}", soln->root.get());
+ ASSERT(bestIsScanOverA || bestIsScanOverB);
+ }
+};
+
+/**
+ * No plan produces any results or hits EOF. In this case we should prefer covered solutions to
+ * non-covered solutions.
+ */
+class PlanRankingPreferCoveredEvenIfNoResults : public PlanRankingTestBase {
+public:
+ void run() {
+ // We insert lots of copies of {a:1, b:1}. We have the indices {a:1} and {a:1, b:1},
+ // the query is for a doc that doesn't exist, but there is a projection over 'a' and
+ // 'b'. We should prefer the index that provides a covered query.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "b" << 1));
}
- };
- /**
- * Same as PlanRankingPreferImmediateEOF, but substitute a range predicate on "a" for the
- * equality predicate on "a". The presence of the range predicate has an impact on the
- * intersection plan that is raced against the single-index plans: since "a" no longer generates
- * point interval bounds, the results of the index scan aren't guaranteed to be returned in
- * RecordId order, and so the intersection plan uses the AND_HASHED stage instead of the
- * AND_SORTED stage. It is still the case that the query should pick the plan that uses index
- * "b", instead of the plan that uses index "a" or the (hashed) intersection plan.
- */
- class PlanRankingPreferImmediateEOFAgainstHashed : public PlanRankingTestBase {
- public:
- void run() {
- // 'a' is very selective, 'b' is not.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << i << "b" << 1));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Run the query {a:N+1, b:1}. (No such document.)
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns, BSON("a" << BSON("$gte" << N + 1)
- << "b" << 1), &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // {a: 100} is super selective so choose that.
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {filter: {b:1}, node: {ixscan: {pattern: {a: 1}}}}}",
- soln->root.get()));
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ // There is no data that matches this query ({a:2}). Both scans will hit EOF before
+ // returning any data.
+
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(
+ ns, BSON("a" << 2), BSONObj(), BSON("_id" << 0 << "a" << 1 << "b" << 1), &cq)
+ .isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+ // Prefer the fully covered plan.
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{proj: {spec: {_id:0, a:1, b:1}, node: {ixscan: {pattern: {a: 1, b:1}}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * We have an index on "a" which is somewhat selective and an index on "b" which is highly
+ * selective (will cause an immediate EOF). Make sure that a query with predicates on both "a"
+ * and "b" will use the index on "b".
+ */
+class PlanRankingPreferImmediateEOF : public PlanRankingTestBase {
+public:
+ void run() {
+ // 'a' is very selective, 'b' is not.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << i << "b" << 1));
}
- };
- /**
- * We have an index on _id and a query over _id with a sort. Ensure that we don't pick a
- * collscan as the best plan even though the _id-scanning solution doesn't produce any results.
- */
- class PlanRankingNoCollscan : public PlanRankingTestBase {
- public:
- void run() {
- for (int i = 0; i < N; ++i) {
- insert(BSON("_id" << i));
- }
-
- addIndex(BSON("_id" << 1));
-
- // Run a query with a sort. The blocking sort won't produce any data during the
- // evaluation period.
- CanonicalQuery* cq;
- BSONObj queryObj = BSON("_id" << BSON("$gte" << 20 << "$lte" << 200));
- BSONObj sortObj = BSON("c" << 1);
- BSONObj projObj = BSONObj();
- ASSERT(CanonicalQuery::canonicalize(ns,
- queryObj,
- sortObj,
- projObj,
- &cq).isOK());
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
-
- // The best must not be a collscan.
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{sort: {pattern: {c: 1}, limit: 0, node: {"
- "fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {_id: 1}}}}}}}}",
- soln->root.get()));
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Run the query {a:N+1, b:1}. (No such document.)
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns, BSON("a" << N + 1 << "b" << 1), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // {a: 100} is super selective so choose that.
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {filter: {b:1}, node: {ixscan: {pattern: {a: 1}}}}}", soln->root.get()));
+ }
+};
+
+/**
+ * Same as PlanRankingPreferImmediateEOF, but substitute a range predicate on "a" for the
+ * equality predicate on "a". The presence of the range predicate has an impact on the
+ * intersection plan that is raced against the single-index plans: since "a" no longer generates
+ * point interval bounds, the results of the index scan aren't guaranteed to be returned in
+ * RecordId order, and so the intersection plan uses the AND_HASHED stage instead of the
+ * AND_SORTED stage. It is still the case that the query should pick the plan that uses index
+ * "b", instead of the plan that uses index "a" or the (hashed) intersection plan.
+ */
+class PlanRankingPreferImmediateEOFAgainstHashed : public PlanRankingTestBase {
+public:
+ void run() {
+ // 'a' is very selective, 'b' is not.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << i << "b" << 1));
}
- };
- /**
- * No indices are available, output a collscan.
- */
- class PlanRankingCollscan : public PlanRankingTestBase {
- public:
- void run() {
- // Insert data for which we have no index.
- for (int i = 0; i < N; ++i) {
- insert(BSON("foo" << i));
- }
-
- // Look for A Space Odyssey.
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns, BSON("foo" << 2001), &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
-
- // The best must be a collscan.
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{cscan: {dir: 1, filter: {foo: 2001}}}",
- soln->root.get()));
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Run the query {a:N+1, b:1}. (No such document.)
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns, BSON("a" << BSON("$gte" << N + 1) << "b" << 1), &cq)
+ .isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // {a: 100} is super selective so choose that.
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {filter: {b:1}, node: {ixscan: {pattern: {a: 1}}}}}", soln->root.get()));
+ }
+};
+
+/**
+ * We have an index on _id and a query over _id with a sort. Ensure that we don't pick a
+ * collscan as the best plan even though the _id-scanning solution doesn't produce any results.
+ */
+class PlanRankingNoCollscan : public PlanRankingTestBase {
+public:
+ void run() {
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("_id" << i));
}
- };
- /**
- * Index intersection solutions can be covered when single-index solutions
- * are not. If the single-index solutions need to do a lot of fetching,
- * then ixisect should win.
- */
- class PlanRankingIxisectCovered : public PlanRankingTestBase {
- public:
- void run() {
- // Neither 'a' nor 'b' is selective.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "b" << 1));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Query {a:1, b:1}, and project out all fields other than 'a' and 'b'.
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- BSON("a" << 1 << "b" << 1),
- BSONObj(),
- BSON("_id" << 0 << "a" << 1 << "b" << 1),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // We should choose an ixisect plan because it requires fewer fetches.
- // Takes ownership of cq.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{proj: {spec: {_id:0,a:1,b:1}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
- soln->root.get()));
+ addIndex(BSON("_id" << 1));
+
+ // Run a query with a sort. The blocking sort won't produce any data during the
+ // evaluation period.
+ CanonicalQuery* cq;
+ BSONObj queryObj = BSON("_id" << BSON("$gte" << 20 << "$lte" << 200));
+ BSONObj sortObj = BSON("c" << 1);
+ BSONObj projObj = BSONObj();
+ ASSERT(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq).isOK());
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+
+ // The best must not be a collscan.
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{sort: {pattern: {c: 1}, limit: 0, node: {"
+ "fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {_id: 1}}}}}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * No indices are available, output a collscan.
+ */
+class PlanRankingCollscan : public PlanRankingTestBase {
+public:
+ void run() {
+ // Insert data for which we have no index.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("foo" << i));
}
- };
- /**
- * Use the same data, same indices, and same query as the previous
- * test case, except without the projection. The query is not covered
- * by the index in this case, which means that there is no advantage
- * to an index intersection solution.
- */
- class PlanRankingIxisectNonCovered : public PlanRankingTestBase {
- public:
- void run() {
- // Neither 'a' nor 'b' is selective.
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "b" << 1));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Query {a:1, b:1}.
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- BSON("a" << 1 << "b" << 1),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // The intersection is large, and ixisect does not make the
- // query covered. We should NOT choose an intersection plan.
- QuerySolution* soln = pickBestPlan(cq);
- bool bestIsScanOverA = QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}",
- soln->root.get());
- bool bestIsScanOverB = QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {ixscan: {pattern: {b: 1}}}}}",
- soln->root.get());
- ASSERT(bestIsScanOverA || bestIsScanOverB);
+ // Look for A Space Odyssey.
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns, BSON("foo" << 2001), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+
+ // The best must be a collscan.
+ ASSERT(QueryPlannerTestLib::solutionMatches("{cscan: {dir: 1, filter: {foo: 2001}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * Index intersection solutions can be covered when single-index solutions
+ * are not. If the single-index solutions need to do a lot of fetching,
+ * then ixisect should win.
+ */
+class PlanRankingIxisectCovered : public PlanRankingTestBase {
+public:
+ void run() {
+ // Neither 'a' nor 'b' is selective.
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "b" << 1));
}
- };
- /**
- * Index intersection solutions may require fewer fetches even if it does not make the
- * query covered. The ixisect plan will scan as many index keys as the union of the two
- * single index plans, but only needs to retrieve full documents for the intersection
- * of the two plans---this could mean fewer fetches!
- */
- class PlanRankingNonCoveredIxisectFetchesLess : public PlanRankingTestBase {
- public:
- void run() {
- // Set up data so that the following conditions hold:
- // 1) Documents matching {a: 1} are of high cardinality.
- // 2) Documents matching {b: 1} are of high cardinality.
- // 3) Documents matching {a: 1, b: 1} are of low cardinality---
- // the intersection is small.
- // 4) At least one of the documents in the intersection is
- // returned during the trial period.
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Query {a:1, b:1}, and project out all fields other than 'a' and 'b'.
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns,
+ BSON("a" << 1 << "b" << 1),
+ BSONObj(),
+ BSON("_id" << 0 << "a" << 1 << "b" << 1),
+ &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // We should choose an ixisect plan because it requires fewer fetches.
+ // Takes ownership of cq.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{proj: {spec: {_id:0,a:1,b:1}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * Use the same data, same indices, and same query as the previous
+ * test case, except without the projection. The query is not covered
+ * by the index in this case, which means that there is no advantage
+ * to an index intersection solution.
+ */
+class PlanRankingIxisectNonCovered : public PlanRankingTestBase {
+public:
+ void run() {
+ // Neither 'a' nor 'b' is selective.
+ for (int i = 0; i < N; ++i) {
insert(BSON("a" << 1 << "b" << 1));
- for (int i = 0; i < N/2; ++i) {
- insert(BSON("a" << 1 << "b" << 2));
- }
- for (int i = 0; i < N/2; ++i) {
- insert(BSON("a" << 2 << "b" << 1));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Neither the predicate on 'b' nor the predicate on 'a' is
- // very selective: both retrieve about half the documents.
- // However, the intersection is very small, which makes
- // the intersection plan desirable.
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- fromjson("{a: 1, b: 1}"),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
- soln->root.get()));
}
- };
- /**
- * If the intersection is small, an AND_SORTED plan may be able to
- * hit EOF before the single index plans.
- */
- class PlanRankingIxisectHitsEOFFirst : public PlanRankingTestBase {
- public:
- void run() {
- // Set up the data so that for the query {a: 1, b: 1}, the
- // intersection is empty. The single index plans have to do
- // more fetching from disk in order to determine that the result
- // set is empty. As a result, the intersection plan hits EOF first.
- for (int i = 0; i < 30; ++i) {
- insert(BSON("a" << 1 << "b" << 2));
- }
- for (int i = 0; i < 30; ++i) {
- insert(BSON("a" << 2 << "b" << 1));
- }
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 2 << "b" << 2));
- }
-
- // Add indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- fromjson("{a: 1, b: 1}"),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Choose the index intersection plan.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
- soln->root.get()));
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Query {a:1, b:1}.
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns, BSON("a" << 1 << "b" << 1), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // The intersection is large, and ixisect does not make the
+ // query covered. We should NOT choose an intersection plan.
+ QuerySolution* soln = pickBestPlan(cq);
+ bool bestIsScanOverA = QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}", soln->root.get());
+ bool bestIsScanOverB = QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {ixscan: {pattern: {b: 1}}}}}", soln->root.get());
+ ASSERT(bestIsScanOverA || bestIsScanOverB);
+ }
+};
+
+/**
+ * Index intersection solutions may require fewer fetches even if it does not make the
+ * query covered. The ixisect plan will scan as many index keys as the union of the two
+ * single index plans, but only needs to retrieve full documents for the intersection
+ * of the two plans---this could mean fewer fetches!
+ */
+class PlanRankingNonCoveredIxisectFetchesLess : public PlanRankingTestBase {
+public:
+ void run() {
+ // Set up data so that the following conditions hold:
+ // 1) Documents matching {a: 1} are of high cardinality.
+ // 2) Documents matching {b: 1} are of high cardinality.
+ // 3) Documents matching {a: 1, b: 1} are of low cardinality---
+ // the intersection is small.
+ // 4) At least one of the documents in the intersection is
+ // returned during the trial period.
+ insert(BSON("a" << 1 << "b" << 1));
+ for (int i = 0; i < N / 2; ++i) {
+ insert(BSON("a" << 1 << "b" << 2));
+ }
+ for (int i = 0; i < N / 2; ++i) {
+ insert(BSON("a" << 2 << "b" << 1));
}
- };
- /**
- * If we query on 'a', 'b', and 'c' with indices on all three fields,
- * then there are three possible size-2 index intersections to consider.
- * Make sure we choose the right one.
- */
- class PlanRankingChooseBetweenIxisectPlans : public PlanRankingTestBase {
- public:
- void run() {
- // Set up the data so that for the query {a: 1, b: 1, c: 1}, the intersection
- // between 'b' and 'c' is small, and the other intersections are larger.
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << 1 << "b" << 1 << "c" << 1));
- }
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << 2 << "b" << 1 << "c" << 1));
- }
- for (int i = 0; i < N/2; ++i) {
- insert(BSON("a" << 1 << "b" << 1 << "c" << 2));
- insert(BSON("a" << 1 << "b" << 2 << "c" << 1));
- }
-
- // Add indices on 'a', 'b', and 'c'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- fromjson("{a: 1, b: 1, c: 1}"),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Intersection between 'b' and 'c' should hit EOF while the
- // other plans are busy fetching documents.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {b:1}}},"
- "{ixscan: {filter: null, pattern: {c:1}}}]}}}}",
- soln->root.get()));
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Neither the predicate on 'b' nor the predicate on 'a' is
+ // very selective: both retrieve about half the documents.
+ // However, the intersection is very small, which makes
+ // the intersection plan desirable.
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns, fromjson("{a: 1, b: 1}"), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * If the intersection is small, an AND_SORTED plan may be able to
+ * hit EOF before the single index plans.
+ */
+class PlanRankingIxisectHitsEOFFirst : public PlanRankingTestBase {
+public:
+ void run() {
+ // Set up the data so that for the query {a: 1, b: 1}, the
+ // intersection is empty. The single index plans have to do
+ // more fetching from disk in order to determine that the result
+ // set is empty. As a result, the intersection plan hits EOF first.
+ for (int i = 0; i < 30; ++i) {
+ insert(BSON("a" << 1 << "b" << 2));
+ }
+ for (int i = 0; i < 30; ++i) {
+ insert(BSON("a" << 2 << "b" << 1));
+ }
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 2 << "b" << 2));
}
- };
- /**
- * When no other information is available, prefer solutions without
- * a blocking sort stage.
- */
- class PlanRankingAvoidBlockingSort : public PlanRankingTestBase {
- public:
- void run() {
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "d" << i));
- }
-
- // The index {d: 1, e: 1} provides the desired sort order,
- // while index {a: 1, b: 1} can be used to answer the
- // query predicate, but does not provide the sort.
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("d" << 1 << "e" << 1));
-
- // Query: find({a: 1}).sort({d: 1})
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- BSON("a" << 1),
- BSON("d" << 1), // sort
- BSONObj(), // projection
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // No results will be returned during the trial period,
- // so we expect to choose {d: 1, e: 1}, as it allows us
- // to avoid the sort stage.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {d:1,e:1}}}}}",
- soln->root.get()));
+ // Add indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns, fromjson("{a: 1, b: 1}"), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Choose the index intersection plan.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * If we query on 'a', 'b', and 'c' with indices on all three fields,
+ * then there are three possible size-2 index intersections to consider.
+ * Make sure we choose the right one.
+ */
+class PlanRankingChooseBetweenIxisectPlans : public PlanRankingTestBase {
+public:
+ void run() {
+ // Set up the data so that for the query {a: 1, b: 1, c: 1}, the intersection
+ // between 'b' and 'c' is small, and the other intersections are larger.
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ }
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << 2 << "b" << 1 << "c" << 1));
+ }
+ for (int i = 0; i < N / 2; ++i) {
+ insert(BSON("a" << 1 << "b" << 1 << "c" << 2));
+ insert(BSON("a" << 1 << "b" << 2 << "c" << 1));
}
- };
- /**
- * Make sure we run candidate plans for long enough when none of the
- * plans are producing results.
- */
- class PlanRankingWorkPlansLongEnough : public PlanRankingTestBase {
- public:
- void run() {
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1));
- insert(BSON("a" << 1 << "b" << 1 << "c" << i));
- }
-
- // Indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // Solutions using either 'a' or 'b' will take a long time to start producing
- // results. However, an index scan on 'b' will start producing results sooner
- // than an index scan on 'a'.
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- fromjson("{a: 1, b: 1, c: {$gte: 5000}}"),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Use index on 'b'.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {ixscan: {pattern: {b: 1}}}}}",
- soln->root.get()));
+ // Add indices on 'a', 'b', and 'c'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns, fromjson("{a: 1, b: 1, c: 1}"), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Intersection between 'b' and 'c' should hit EOF while the
+ // other plans are busy fetching documents.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {b:1}}},"
+ "{ixscan: {filter: null, pattern: {c:1}}}]}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * When no other information is available, prefer solutions without
+ * a blocking sort stage.
+ */
+class PlanRankingAvoidBlockingSort : public PlanRankingTestBase {
+public:
+ void run() {
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "d" << i));
}
- };
- /**
- * Suppose we have two plans which are roughly equivalent, other than that
- * one uses an index which involves doing a lot more skipping of index keys.
- * Prefer the plan which does not have to do this index key skipping.
- */
- class PlanRankingAccountForKeySkips : public PlanRankingTestBase {
- public:
- void run() {
- for (int i = 0; i < 100; ++i) {
- insert(BSON("a" << i << "b" << i << "c" << i));
- }
-
- // These indices look equivalent to the ranker for the query below unless we account
- // for key skipping. We should pick index {a: 1} if we account for key skipping
- // properly.
- addIndex(BSON("b" << 1 << "c" << 1));
- addIndex(BSON("a" << 1));
-
- CanonicalQuery* cq;
- ASSERT(CanonicalQuery::canonicalize(ns,
- fromjson("{a: 9, b: {$ne: 10}, c: 9}"),
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Expect to use index {a: 1, b: 1}.
- QuerySolution* soln = pickBestPlan(cq);
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}",
- soln->root.get()));
+ // The index {d: 1, e: 1} provides the desired sort order,
+ // while index {a: 1, b: 1} can be used to answer the
+ // query predicate, but does not provide the sort.
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("d" << 1 << "e" << 1));
+
+ // Query: find({a: 1}).sort({d: 1})
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns,
+ BSON("a" << 1),
+ BSON("d" << 1), // sort
+ BSONObj(), // projection
+ &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // No results will be returned during the trial period,
+ // so we expect to choose {d: 1, e: 1}, as it allows us
+ // to avoid the sort stage.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {d:1,e:1}}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * Make sure we run candidate plans for long enough when none of the
+ * plans are producing results.
+ */
+class PlanRankingWorkPlansLongEnough : public PlanRankingTestBase {
+public:
+ void run() {
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1));
+ insert(BSON("a" << 1 << "b" << 1 << "c" << i));
}
- };
-
- class All : public Suite {
- public:
- All() : Suite( "query_plan_ranking" ) {}
-
- void setupTests() {
- add<PlanRankingIntersectOverride>();
- add<PlanRankingIntersectWithBackup>();
- add<PlanRankingPreferCovered>();
- add<PlanRankingAvoidIntersectIfNoResults>();
- add<PlanRankingPreferCoveredEvenIfNoResults>();
- add<PlanRankingPreferImmediateEOF>();
- add<PlanRankingPreferImmediateEOFAgainstHashed>();
- add<PlanRankingNoCollscan>();
- add<PlanRankingCollscan>();
- // TODO: These don't work without counting FETCH and FETCH is now gone.
- // add<PlanRankingIxisectCovered>();
- // add<PlanRankingIxisectNonCovered>();
- // add<PlanRankingNonCoveredIxisectFetchesLess>();
- // add<PlanRankingIxisectHitsEOFFirst>();
- // add<PlanRankingChooseBetweenIxisectPlans>();
- add<PlanRankingAvoidBlockingSort>();
- add<PlanRankingWorkPlansLongEnough>();
- add<PlanRankingAccountForKeySkips>();
+
+ // Indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // Solutions using either 'a' or 'b' will take a long time to start producing
+ // results. However, an index scan on 'b' will start producing results sooner
+ // than an index scan on 'a'.
+ CanonicalQuery* cq;
+ ASSERT(CanonicalQuery::canonicalize(ns, fromjson("{a: 1, b: 1, c: {$gte: 5000}}"), &cq)
+ .isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Use index on 'b'.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches("{fetch: {node: {ixscan: {pattern: {b: 1}}}}}",
+ soln->root.get()));
+ }
+};
+
+/**
+ * Suppose we have two plans which are roughly equivalent, other than that
+ * one uses an index which involves doing a lot more skipping of index keys.
+ * Prefer the plan which does not have to do this index key skipping.
+ */
+class PlanRankingAccountForKeySkips : public PlanRankingTestBase {
+public:
+ void run() {
+ for (int i = 0; i < 100; ++i) {
+ insert(BSON("a" << i << "b" << i << "c" << i));
}
- };
- SuiteInstance<All> planRankingAll;
+ // These indices look equivalent to the ranker for the query below unless we account
+ // for key skipping. We should pick index {a: 1} if we account for key skipping
+ // properly.
+ addIndex(BSON("b" << 1 << "c" << 1));
+ addIndex(BSON("a" << 1));
+
+ CanonicalQuery* cq;
+ ASSERT(
+ CanonicalQuery::canonicalize(ns, fromjson("{a: 9, b: {$ne: 10}, c: 9}"), &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Expect to use index {a: 1, b: 1}.
+ QuerySolution* soln = pickBestPlan(cq);
+ ASSERT(QueryPlannerTestLib::solutionMatches("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}",
+ soln->root.get()));
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_plan_ranking") {}
+
+ void setupTests() {
+ add<PlanRankingIntersectOverride>();
+ add<PlanRankingIntersectWithBackup>();
+ add<PlanRankingPreferCovered>();
+ add<PlanRankingAvoidIntersectIfNoResults>();
+ add<PlanRankingPreferCoveredEvenIfNoResults>();
+ add<PlanRankingPreferImmediateEOF>();
+ add<PlanRankingPreferImmediateEOFAgainstHashed>();
+ add<PlanRankingNoCollscan>();
+ add<PlanRankingCollscan>();
+ // TODO: These don't work without counting FETCH and FETCH is now gone.
+ // add<PlanRankingIxisectCovered>();
+ // add<PlanRankingIxisectNonCovered>();
+ // add<PlanRankingNonCoveredIxisectFetchesLess>();
+ // add<PlanRankingIxisectHitsEOFFirst>();
+ // add<PlanRankingChooseBetweenIxisectPlans>();
+ add<PlanRankingAvoidBlockingSort>();
+ add<PlanRankingWorkPlansLongEnough>();
+ add<PlanRankingAccountForKeySkips>();
+ }
+};
+
+SuiteInstance<All> planRankingAll;
} // namespace PlanRankingTest
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index 2b9d88bad83..f38efd42e45 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -49,248 +49,251 @@
namespace mongo {
- // How we access the external setParameter testing bool.
- extern bool internalQueryForceIntersectionPlans;
+// How we access the external setParameter testing bool.
+extern bool internalQueryForceIntersectionPlans;
} // namespace mongo
namespace QueryMultiPlanRunner {
- using boost::scoped_ptr;
- using std::auto_ptr;
- using std::vector;
-
- /**
- * Create query solution.
- */
- QuerySolution* createQuerySolution() {
- std::auto_ptr<QuerySolution> soln(new QuerySolution());
- soln->cacheData.reset(new SolutionCacheData());
- soln->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
- soln->cacheData->tree.reset(new PlanCacheIndexTree());
- return soln.release();
+using boost::scoped_ptr;
+using std::auto_ptr;
+using std::vector;
+
+/**
+ * Create query solution.
+ */
+QuerySolution* createQuerySolution() {
+ std::auto_ptr<QuerySolution> soln(new QuerySolution());
+ soln->cacheData.reset(new SolutionCacheData());
+ soln->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
+ soln->cacheData->tree.reset(new PlanCacheIndexTree());
+ return soln.release();
+}
+
+class MultiPlanRunnerBase {
+public:
+ MultiPlanRunnerBase() : _client(&_txn) {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
}
- class MultiPlanRunnerBase {
- public:
- MultiPlanRunnerBase() : _client(&_txn) {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
- }
+ virtual ~MultiPlanRunnerBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
- virtual ~MultiPlanRunnerBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
- }
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ void insert(const BSONObj& obj) {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.insert(ns(), obj);
+ }
+
+ void remove(const BSONObj& obj) {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.remove(ns(), obj);
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageMultiPlanRunner";
+ }
+
+protected:
+ OperationContextImpl _txn;
+ DBDirectClient _client;
+};
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
- }
- void insert(const BSONObj& obj) {
- Client::WriteContext ctx(&_txn, ns());
- _client.insert(ns(), obj);
+// Basic ranking test: collection scan vs. highly selective index scan. Make sure we also get
+// all expected results out as well.
+class MPRCollectionScanVsHighlySelectiveIXScan : public MultiPlanRunnerBase {
+public:
+ void run() {
+ const int N = 5000;
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("foo" << (i % 10)));
}
- void remove(const BSONObj& obj) {
- Client::WriteContext ctx(&_txn, ns());
- _client.remove(ns(), obj);
+ addIndex(BSON("foo" << 1));
+
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ const Collection* coll = ctx.getCollection();
+
+ // Plan 0: IXScan over foo == 7
+ // Every call to work() returns something so this should clearly win (by current scoring
+ // at least).
+ IndexScanParams ixparams;
+ ixparams.descriptor =
+ coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("foo" << 1));
+ ixparams.bounds.isSimpleRange = true;
+ ixparams.bounds.startKey = BSON("" << 7);
+ ixparams.bounds.endKey = BSON("" << 7);
+ ixparams.bounds.endKeyInclusive = true;
+ ixparams.direction = 1;
+
+ auto_ptr<WorkingSet> sharedWs(new WorkingSet());
+ IndexScan* ix = new IndexScan(&_txn, ixparams, sharedWs.get(), NULL);
+ auto_ptr<PlanStage> firstRoot(new FetchStage(&_txn, sharedWs.get(), ix, NULL, coll));
+
+ // Plan 1: CollScan with matcher.
+ CollectionScanParams csparams;
+ csparams.collection = coll;
+ csparams.direction = CollectionScanParams::FORWARD;
+
+ // Make the filter.
+ BSONObj filterObj = BSON("foo" << 7);
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
+ verify(swme.isOK());
+ auto_ptr<MatchExpression> filter(swme.getValue());
+ // Make the stage.
+ auto_ptr<PlanStage> secondRoot(
+ new CollectionScan(&_txn, csparams, sharedWs.get(), filter.get()));
+
+ // Hand the plans off to the runner.
+ CanonicalQuery* cq = NULL;
+ verify(CanonicalQuery::canonicalize(ns(), BSON("foo" << 7), &cq).isOK());
+ verify(NULL != cq);
+
+ MultiPlanStage* mps = new MultiPlanStage(&_txn, ctx.getCollection(), cq);
+ mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
+ mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
+
+ // Plan 0 aka the first plan aka the index scan should be the best. NULL means that
+ // 'mps' will not yield during plan selection.
+ mps->pickBestPlan(NULL);
+ ASSERT(mps->bestPlanChosen());
+ ASSERT_EQUALS(0, mps->bestPlanIdx());
+
+ // Takes ownership of arguments other than 'collection'.
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(
+ &_txn, sharedWs.release(), mps, cq, coll, PlanExecutor::YIELD_MANUAL, &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ // Get all our results out.
+ int results = 0;
+ BSONObj obj;
+ while (PlanExecutor::ADVANCED == exec->getNext(&obj, NULL)) {
+ ASSERT_EQUALS(obj["foo"].numberInt(), 7);
+ ++results;
}
- static const char* ns() { return "unittests.QueryStageMultiPlanRunner"; }
-
- protected:
- OperationContextImpl _txn;
- DBDirectClient _client;
- };
-
-
- // Basic ranking test: collection scan vs. highly selective index scan. Make sure we also get
- // all expected results out as well.
- class MPRCollectionScanVsHighlySelectiveIXScan : public MultiPlanRunnerBase {
- public:
- void run() {
- const int N = 5000;
- for (int i = 0; i < N; ++i) {
- insert(BSON("foo" << (i % 10)));
- }
-
- addIndex(BSON("foo" << 1));
-
- AutoGetCollectionForRead ctx(&_txn, ns());
- const Collection* coll = ctx.getCollection();
-
- // Plan 0: IXScan over foo == 7
- // Every call to work() returns something so this should clearly win (by current scoring
- // at least).
- IndexScanParams ixparams;
- ixparams.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("foo" << 1));
- ixparams.bounds.isSimpleRange = true;
- ixparams.bounds.startKey = BSON("" << 7);
- ixparams.bounds.endKey = BSON("" << 7);
- ixparams.bounds.endKeyInclusive = true;
- ixparams.direction = 1;
-
- auto_ptr<WorkingSet> sharedWs(new WorkingSet());
- IndexScan* ix = new IndexScan(&_txn, ixparams, sharedWs.get(), NULL);
- auto_ptr<PlanStage> firstRoot(new FetchStage(&_txn, sharedWs.get(), ix, NULL, coll));
-
- // Plan 1: CollScan with matcher.
- CollectionScanParams csparams;
- csparams.collection = coll;
- csparams.direction = CollectionScanParams::FORWARD;
-
- // Make the filter.
- BSONObj filterObj = BSON("foo" << 7);
- StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
- verify(swme.isOK());
- auto_ptr<MatchExpression> filter(swme.getValue());
- // Make the stage.
- auto_ptr<PlanStage> secondRoot(new CollectionScan(&_txn, csparams, sharedWs.get(),
- filter.get()));
-
- // Hand the plans off to the runner.
- CanonicalQuery* cq = NULL;
- verify(CanonicalQuery::canonicalize(ns(), BSON("foo" << 7), &cq).isOK());
- verify(NULL != cq);
-
- MultiPlanStage* mps = new MultiPlanStage(&_txn, ctx.getCollection(), cq);
- mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
- mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
-
- // Plan 0 aka the first plan aka the index scan should be the best. NULL means that
- // 'mps' will not yield during plan selection.
- mps->pickBestPlan(NULL);
- ASSERT(mps->bestPlanChosen());
- ASSERT_EQUALS(0, mps->bestPlanIdx());
-
- // Takes ownership of arguments other than 'collection'.
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, sharedWs.release(), mps, cq, coll,
- PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- // Get all our results out.
- int results = 0;
- BSONObj obj;
- while (PlanExecutor::ADVANCED == exec->getNext(&obj, NULL)) {
- ASSERT_EQUALS(obj["foo"].numberInt(), 7);
- ++results;
- }
-
- ASSERT_EQUALS(results, N / 10);
+ ASSERT_EQUALS(results, N / 10);
+ }
+};
+
+// Case in which we select a blocking plan as the winner, and a non-blocking plan
+// is available as a backup.
+class MPRBackupPlan : public MultiPlanRunnerBase {
+public:
+ void run() {
+ // Data is just a single {_id: 1, a: 1, b: 1} document.
+ insert(BSON("_id" << 1 << "a" << 1 << "b" << 1));
+
+ // Indices on 'a' and 'b'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+
+ // Query for both 'a' and 'b' and sort on 'b'.
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns(),
+ BSON("a" << 1 << "b" << 1), // query
+ BSON("b" << 1), // sort
+ BSONObj(), // proj
+ &cq).isOK());
+ ASSERT(NULL != cq);
+ boost::scoped_ptr<CanonicalQuery> killCq(cq);
+
+ // Force index intersection.
+ bool forceIxisectOldValue = internalQueryForceIntersectionPlans;
+ internalQueryForceIntersectionPlans = true;
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_txn, collection, cq, &plannerParams);
+ // Turn this off otherwise it pops up in some plans.
+ plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;
+
+ // Plan.
+ vector<QuerySolution*> solutions;
+ Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
+ ASSERT(status.isOK());
+
+ // We expect a plan using index {a: 1} and plan using index {b: 1} and
+ // an index intersection plan.
+ ASSERT_EQUALS(solutions.size(), 3U);
+
+ // Fill out the MultiPlanStage.
+ scoped_ptr<MultiPlanStage> mps(new MultiPlanStage(&_txn, collection, cq));
+ scoped_ptr<WorkingSet> ws(new WorkingSet());
+ // Put each solution from the planner into the MPR.
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ PlanStage* root;
+ ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws.get(), &root));
+ // Takes ownership of 'solutions[i]' and 'root'.
+ mps->addPlan(solutions[i], root, ws.get());
}
- };
-
- // Case in which we select a blocking plan as the winner, and a non-blocking plan
- // is available as a backup.
- class MPRBackupPlan : public MultiPlanRunnerBase {
- public:
- void run() {
- // Data is just a single {_id: 1, a: 1, b: 1} document.
- insert(BSON("_id" << 1 << "a" << 1 << "b" << 1));
-
- // Indices on 'a' and 'b'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
-
- // Query for both 'a' and 'b' and sort on 'b'.
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns(),
- BSON("a" << 1 << "b" << 1), // query
- BSON("b" << 1), // sort
- BSONObj(), // proj
- &cq).isOK());
- ASSERT(NULL != cq);
- boost::scoped_ptr<CanonicalQuery> killCq(cq);
-
- // Force index intersection.
- bool forceIxisectOldValue = internalQueryForceIntersectionPlans;
- internalQueryForceIntersectionPlans = true;
-
- // Get planner params.
- QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq, &plannerParams);
- // Turn this off otherwise it pops up in some plans.
- plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;
-
- // Plan.
- vector<QuerySolution*> solutions;
- Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
- ASSERT(status.isOK());
-
- // We expect a plan using index {a: 1} and plan using index {b: 1} and
- // an index intersection plan.
- ASSERT_EQUALS(solutions.size(), 3U);
-
- // Fill out the MultiPlanStage.
- scoped_ptr<MultiPlanStage> mps(new MultiPlanStage(&_txn, collection, cq));
- scoped_ptr<WorkingSet> ws(new WorkingSet());
- // Put each solution from the planner into the MPR.
- for (size_t i = 0; i < solutions.size(); ++i) {
- PlanStage* root;
- ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws.get(), &root));
- // Takes ownership of 'solutions[i]' and 'root'.
- mps->addPlan(solutions[i], root, ws.get());
- }
-
- // This sets a backup plan. NULL means that 'mps' will not yield.
- mps->pickBestPlan(NULL);
- ASSERT(mps->bestPlanChosen());
- ASSERT(mps->hasBackupPlan());
-
- // We should have picked the index intersection plan due to forcing ixisect.
- QuerySolution* soln = mps->bestSolution();
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{sort: {pattern: {b: 1}, limit: 0, node: "
- "{fetch: {node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}",
- soln->root.get()));
-
- // Get the resulting document.
- PlanStage::StageState state = PlanStage::NEED_TIME;
- WorkingSetID wsid;
- while (state != PlanStage::ADVANCED) {
- state = mps->work(&wsid);
- }
- WorkingSetMember* member = ws->get(wsid);
-
- // Check the document returned by the query.
- ASSERT(member->hasObj());
- BSONObj expectedDoc = BSON("_id" << 1 << "a" << 1 << "b" << 1);
- ASSERT(expectedDoc.woCompare(member->obj.value()) == 0);
-
- // The blocking plan became unblocked, so we should no longer have a backup plan,
- // and the winning plan should still be the index intersection one.
- ASSERT(!mps->hasBackupPlan());
- soln = mps->bestSolution();
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{sort: {pattern: {b: 1}, limit: 0, node: "
- "{fetch: {node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}",
- soln->root.get()));
-
- // Restore index intersection force parameter.
- internalQueryForceIntersectionPlans = forceIxisectOldValue;
+
+ // This sets a backup plan. NULL means that 'mps' will not yield.
+ mps->pickBestPlan(NULL);
+ ASSERT(mps->bestPlanChosen());
+ ASSERT(mps->hasBackupPlan());
+
+ // We should have picked the index intersection plan due to forcing ixisect.
+ QuerySolution* soln = mps->bestSolution();
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{fetch: {node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}",
+ soln->root.get()));
+
+ // Get the resulting document.
+ PlanStage::StageState state = PlanStage::NEED_TIME;
+ WorkingSetID wsid;
+ while (state != PlanStage::ADVANCED) {
+ state = mps->work(&wsid);
}
- };
+ WorkingSetMember* member = ws->get(wsid);
+
+ // Check the document returned by the query.
+ ASSERT(member->hasObj());
+ BSONObj expectedDoc = BSON("_id" << 1 << "a" << 1 << "b" << 1);
+ ASSERT(expectedDoc.woCompare(member->obj.value()) == 0);
+
+ // The blocking plan became unblocked, so we should no longer have a backup plan,
+ // and the winning plan should still be the index intersection one.
+ ASSERT(!mps->hasBackupPlan());
+ soln = mps->bestSolution();
+ ASSERT(QueryPlannerTestLib::solutionMatches(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{fetch: {node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}",
+ soln->root.get()));
+
+ // Restore index intersection force parameter.
+ internalQueryForceIntersectionPlans = forceIxisectOldValue;
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "query_multi_plan_runner" ) { }
+class All : public Suite {
+public:
+ All() : Suite("query_multi_plan_runner") {}
- void setupTests() {
- add<MPRCollectionScanVsHighlySelectiveIXScan>();
- add<MPRBackupPlan>();
- }
- };
+ void setupTests() {
+ add<MPRCollectionScanVsHighlySelectiveIXScan>();
+ add<MPRBackupPlan>();
+ }
+};
- SuiteInstance<All> queryMultiPlanRunnerAll;
+SuiteInstance<All> queryMultiPlanRunnerAll;
} // namespace QueryMultiPlanRunner
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index f524c421df2..5507f4a88ef 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -49,479 +49,477 @@
namespace QueryPlanExecutor {
- using boost::scoped_ptr;
- using boost::shared_ptr;
- using std::auto_ptr;
- using std::string;
+using boost::scoped_ptr;
+using boost::shared_ptr;
+using std::auto_ptr;
+using std::string;
- class PlanExecutorBase {
- public:
- PlanExecutorBase() : _client(&_txn) {
+class PlanExecutorBase {
+public:
+ PlanExecutorBase() : _client(&_txn) {}
- }
+ virtual ~PlanExecutorBase() {
+ _client.dropCollection(ns());
+ }
- virtual ~PlanExecutorBase() {
- _client.dropCollection(ns());
- }
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
- }
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
- }
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
- }
+ void dropCollection() {
+ _client.dropCollection(ns());
+ }
- void dropCollection() {
- _client.dropCollection(ns());
- }
+ void update(BSONObj& query, BSONObj& updateSpec) {
+ _client.update(ns(), query, updateSpec, false, false);
+ }
- void update(BSONObj& query, BSONObj& updateSpec) {
- _client.update(ns(), query, updateSpec, false, false);
- }
+ /**
+ * Given a match expression, represented as the BSON object 'filterObj',
+ * create a PlanExecutor capable of executing a simple collection
+ * scan.
+ *
+ * The caller takes ownership of the returned PlanExecutor*.
+ */
+ PlanExecutor* makeCollScanExec(Collection* coll, BSONObj& filterObj) {
+ CollectionScanParams csparams;
+ csparams.collection = coll;
+ csparams.direction = CollectionScanParams::FORWARD;
+ auto_ptr<WorkingSet> ws(new WorkingSet());
+
+ // Canonicalize the query
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns(), filterObj, &cq).isOK());
+ verify(NULL != cq);
+
+ // Make the stage.
+ auto_ptr<PlanStage> root(new CollectionScan(&_txn, csparams, ws.get(), cq->root()));
+
+ PlanExecutor* exec;
+ // Hand the plan off to the executor.
+ Status stat = PlanExecutor::make(
+ &_txn, ws.release(), root.release(), cq, coll, PlanExecutor::YIELD_MANUAL, &exec);
+ ASSERT_OK(stat);
+ return exec;
+ }
- /**
- * Given a match expression, represented as the BSON object 'filterObj',
- * create a PlanExecutor capable of executing a simple collection
- * scan.
- *
- * The caller takes ownership of the returned PlanExecutor*.
- */
- PlanExecutor* makeCollScanExec(Collection* coll, BSONObj& filterObj) {
- CollectionScanParams csparams;
- csparams.collection = coll;
- csparams.direction = CollectionScanParams::FORWARD;
- auto_ptr<WorkingSet> ws(new WorkingSet());
-
- // Canonicalize the query
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns(), filterObj, &cq).isOK());
- verify(NULL != cq);
-
- // Make the stage.
- auto_ptr<PlanStage> root(new CollectionScan(&_txn, csparams, ws.get(), cq->root()));
-
- PlanExecutor* exec;
- // Hand the plan off to the executor.
- Status stat = PlanExecutor::make(&_txn, ws.release(), root.release(), cq, coll,
- PlanExecutor::YIELD_MANUAL, &exec);
- ASSERT_OK(stat);
- return exec;
- }
+ /**
+ * @param indexSpec -- a BSONObj giving the index over which to
+ * scan, e.g. {_id: 1}.
+ * @param start -- the lower bound (inclusive) at which to start
+ * the index scan
+ * @param end -- the lower bound (inclusive) at which to end the
+ * index scan
+ *
+ * Returns a PlanExecutor capable of executing an index scan
+ * over the specified index with the specified bounds.
+ *
+ * The caller takes ownership of the returned PlanExecutor*.
+ */
+ PlanExecutor* makeIndexScanExec(Client::Context& context,
+ BSONObj& indexSpec,
+ int start,
+ int end) {
+ // Build the index scan stage.
+ IndexScanParams ixparams;
+ ixparams.descriptor = getIndex(context.db(), indexSpec);
+ ixparams.bounds.isSimpleRange = true;
+ ixparams.bounds.startKey = BSON("" << start);
+ ixparams.bounds.endKey = BSON("" << end);
+ ixparams.bounds.endKeyInclusive = true;
+ ixparams.direction = 1;
+
+ const Collection* coll = context.db()->getCollection(ns());
+
+ auto_ptr<WorkingSet> ws(new WorkingSet());
+ IndexScan* ix = new IndexScan(&_txn, ixparams, ws.get(), NULL);
+ auto_ptr<PlanStage> root(new FetchStage(&_txn, ws.get(), ix, NULL, coll));
+
+ CanonicalQuery* cq;
+ verify(CanonicalQuery::canonicalize(ns(), BSONObj(), &cq).isOK());
+ verify(NULL != cq);
+
+ PlanExecutor* exec;
+ // Hand the plan off to the executor.
+ Status stat = PlanExecutor::make(
+ &_txn, ws.release(), root.release(), cq, coll, PlanExecutor::YIELD_MANUAL, &exec);
+ ASSERT_OK(stat);
+ return exec;
+ }
+
+ static const char* ns() {
+ return "unittests.QueryPlanExecutor";
+ }
+
+ size_t numCursors() {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+ if (!collection)
+ return 0;
+ return collection->getCursorManager()->numCursors();
+ }
+
+ void registerExec(PlanExecutor* exec) {
+ // TODO: This is not correct (create collection under S-lock)
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ WriteUnitOfWork wunit(&_txn);
+ Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, ns());
+ collection->getCursorManager()->registerExecutor(exec);
+ wunit.commit();
+ }
+
+ void deregisterExec(PlanExecutor* exec) {
+ // TODO: This is not correct (create collection under S-lock)
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ WriteUnitOfWork wunit(&_txn);
+ Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, ns());
+ collection->getCursorManager()->deregisterExecutor(exec);
+ wunit.commit();
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+private:
+ IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
+ Collection* collection = db->getCollection(ns());
+ return collection->getIndexCatalog()->findIndexByKeyPattern(&_txn, obj);
+ }
+
+ DBDirectClient _client;
+};
- /**
- * @param indexSpec -- a BSONObj giving the index over which to
- * scan, e.g. {_id: 1}.
- * @param start -- the lower bound (inclusive) at which to start
- * the index scan
- * @param end -- the lower bound (inclusive) at which to end the
- * index scan
- *
- * Returns a PlanExecutor capable of executing an index scan
- * over the specified index with the specified bounds.
- *
- * The caller takes ownership of the returned PlanExecutor*.
- */
- PlanExecutor* makeIndexScanExec(Client::Context& context,
- BSONObj& indexSpec, int start, int end) {
- // Build the index scan stage.
- IndexScanParams ixparams;
- ixparams.descriptor = getIndex(context.db(), indexSpec);
- ixparams.bounds.isSimpleRange = true;
- ixparams.bounds.startKey = BSON("" << start);
- ixparams.bounds.endKey = BSON("" << end);
- ixparams.bounds.endKeyInclusive = true;
- ixparams.direction = 1;
-
- const Collection* coll = context.db()->getCollection(ns());
-
- auto_ptr<WorkingSet> ws(new WorkingSet());
- IndexScan* ix = new IndexScan(&_txn, ixparams, ws.get(), NULL);
- auto_ptr<PlanStage> root(new FetchStage(&_txn, ws.get(), ix, NULL, coll));
-
- CanonicalQuery* cq;
- verify(CanonicalQuery::canonicalize(ns(), BSONObj(), &cq).isOK());
- verify(NULL != cq);
-
- PlanExecutor* exec;
- // Hand the plan off to the executor.
- Status stat = PlanExecutor::make(&_txn, ws.release(), root.release(), cq, coll,
- PlanExecutor::YIELD_MANUAL, &exec);
- ASSERT_OK(stat);
- return exec;
- }
+/**
+ * Test dropping the collection while the
+ * PlanExecutor is doing a collection scan.
+ */
+class DropCollScan : public PlanExecutorBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ insert(BSON("_id" << 1));
+ insert(BSON("_id" << 2));
- static const char* ns() { return "unittests.QueryPlanExecutor"; }
+ BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
- size_t numCursors() {
- AutoGetCollectionForRead ctx(&_txn, ns() );
- Collection* collection = ctx.getCollection();
- if ( !collection )
- return 0;
- return collection->getCursorManager()->numCursors();
- }
+ Collection* coll = ctx.getCollection();
+ scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
+ registerExec(exec.get());
- void registerExec( PlanExecutor* exec ) {
- // TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForRead ctx(&_txn, ns());
- WriteUnitOfWork wunit(&_txn);
- Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, ns());
- collection->getCursorManager()->registerExecutor( exec );
- wunit.commit();
- }
+ BSONObj objOut;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(1, objOut["_id"].numberInt());
- void deregisterExec( PlanExecutor* exec ) {
- // TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForRead ctx(&_txn, ns());
- WriteUnitOfWork wunit(&_txn);
- Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, ns());
- collection->getCursorManager()->deregisterExecutor( exec );
- wunit.commit();
- }
+ // After dropping the collection, the runner
+ // should be dead.
+ dropCollection();
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
- protected:
- OperationContextImpl _txn;
+ deregisterExec(exec.get());
+ }
+};
- private:
- IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- Collection* collection = db->getCollection( ns() );
- return collection->getIndexCatalog()->findIndexByKeyPattern(&_txn, obj);
- }
+/**
+ * Test dropping the collection while the PlanExecutor is doing an index scan.
+ */
+class DropIndexScan : public PlanExecutorBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ insert(BSON("_id" << 1 << "a" << 6));
+ insert(BSON("_id" << 2 << "a" << 7));
+ insert(BSON("_id" << 3 << "a" << 8));
+ BSONObj indexSpec = BSON("a" << 1);
+ addIndex(indexSpec);
+
+ scoped_ptr<PlanExecutor> exec(makeIndexScanExec(ctx.ctx(), indexSpec, 7, 10));
+ registerExec(exec.get());
+
+ BSONObj objOut;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(7, objOut["a"].numberInt());
+
+ // After dropping the collection, the runner
+ // should be dead.
+ dropCollection();
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
+
+ deregisterExec(exec.get());
+ }
+};
- DBDirectClient _client;
- };
+/**
+ * Test dropping the collection while an agg PlanExecutor is doing an index scan.
+ */
+class DropIndexScanAgg : public PlanExecutorBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ insert(BSON("_id" << 1 << "a" << 6));
+ insert(BSON("_id" << 2 << "a" << 7));
+ insert(BSON("_id" << 3 << "a" << 8));
+ BSONObj indexSpec = BSON("a" << 1);
+ addIndex(indexSpec);
+
+ // Create the PlanExecutor which feeds the aggregation pipeline.
+ boost::shared_ptr<PlanExecutor> innerExec(makeIndexScanExec(ctx.ctx(), indexSpec, 7, 10));
+
+ // Create the aggregation pipeline.
+ boost::intrusive_ptr<ExpressionContext> expCtx =
+ new ExpressionContext(&_txn, NamespaceString(ns()));
+
+ string errmsg;
+ BSONObj inputBson = fromjson("{$match: {a: {$gte: 7, $lte: 10}}}");
+ boost::intrusive_ptr<Pipeline> pipeline = Pipeline::parseCommand(errmsg, inputBson, expCtx);
+ ASSERT_EQUALS(errmsg, "");
+
+ // Create the output PlanExecutor that pulls results from the pipeline.
+ std::auto_ptr<WorkingSet> ws(new WorkingSet());
+ std::auto_ptr<PipelineProxyStage> proxy(
+ new PipelineProxyStage(pipeline, innerExec, ws.get()));
+ Collection* collection = ctx.getCollection();
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(
+ &_txn, ws.release(), proxy.release(), collection, PlanExecutor::YIELD_MANUAL, &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> outerExec(rawExec);
+
+ // Only the outer executor gets registered.
+ registerExec(outerExec.get());
+
+ // Verify that both the "inner" and "outer" plan executors have been killed after
+ // dropping the collection.
+ BSONObj objOut;
+ dropCollection();
+ ASSERT_EQUALS(PlanExecutor::DEAD, innerExec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, outerExec->getNext(&objOut, NULL));
+
+ deregisterExec(outerExec.get());
+ }
+};
+
+class SnapshotBase : public PlanExecutorBase {
+protected:
+ void setupCollection() {
+ insert(BSON("_id" << 1 << "a" << 1));
+ insert(BSON("_id" << 2 << "a" << 2 << "payload"
+ << "x"));
+ insert(BSON("_id" << 3 << "a" << 3));
+ insert(BSON("_id" << 4 << "a" << 4));
+ }
/**
- * Test dropping the collection while the
- * PlanExecutor is doing a collection scan.
+ * Increases a document's size dramatically such that the document
+ * exceeds the available padding and must be moved to the end of
+ * the collection.
*/
- class DropCollScan : public PlanExecutorBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- insert(BSON("_id" << 1));
- insert(BSON("_id" << 2));
-
- BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
-
- Collection* coll = ctx.getCollection();
- scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
- registerExec(exec.get());
+ void forceDocumentMove() {
+ BSONObj query = BSON("_id" << 2);
+ BSONObj updateSpec = BSON("$set" << BSON("payload" << payload8k()));
+ update(query, updateSpec);
+ }
- BSONObj objOut;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
- ASSERT_EQUALS(1, objOut["_id"].numberInt());
-
- // After dropping the collection, the runner
- // should be dead.
- dropCollection();
- ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
-
- deregisterExec(exec.get());
- }
- };
+ std::string payload8k() {
+ return std::string(8 * 1024, 'x');
+ }
/**
- * Test dropping the collection while the PlanExecutor is doing an index scan.
+ * Given an array of ints, 'expectedIds', and a PlanExecutor,
+ * 'exec', uses the executor to iterate through the collection. While
+ * iterating, asserts that the _id of each successive document equals
+ * the respective integer in 'expectedIds'.
*/
- class DropIndexScan : public PlanExecutorBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- insert(BSON("_id" << 1 << "a" << 6));
- insert(BSON("_id" << 2 << "a" << 7));
- insert(BSON("_id" << 3 << "a" << 8));
- BSONObj indexSpec = BSON("a" << 1);
- addIndex(indexSpec);
+ void checkIds(int* expectedIds, PlanExecutor* exec) {
+ BSONObj objOut;
+ int idcount = 0;
+ while (PlanExecutor::ADVANCED == exec->getNext(&objOut, NULL)) {
+ ASSERT_EQUALS(expectedIds[idcount], objOut["_id"].numberInt());
+ ++idcount;
+ }
+ }
+};
- scoped_ptr<PlanExecutor> exec(makeIndexScanExec(ctx.ctx(), indexSpec, 7, 10));
- registerExec(exec.get());
+/**
+ * Create a scenario in which the same document is returned
+ * twice due to a concurrent document move and collection
+ * scan.
+ */
+class SnapshotControl : public SnapshotBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ setupCollection();
- BSONObj objOut;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
- ASSERT_EQUALS(7, objOut["a"].numberInt());
+ BSONObj filterObj = fromjson("{a: {$gte: 2}}");
- // After dropping the collection, the runner
- // should be dead.
- dropCollection();
- ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
+ Collection* coll = ctx.getCollection();
+ scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
- deregisterExec(exec.get());
- }
- };
+ BSONObj objOut;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(2, objOut["a"].numberInt());
- /**
- * Test dropping the collection while an agg PlanExecutor is doing an index scan.
- */
- class DropIndexScanAgg : public PlanExecutorBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
+ forceDocumentMove();
- insert(BSON("_id" << 1 << "a" << 6));
- insert(BSON("_id" << 2 << "a" << 7));
- insert(BSON("_id" << 3 << "a" << 8));
- BSONObj indexSpec = BSON("a" << 1);
- addIndex(indexSpec);
-
- // Create the PlanExecutor which feeds the aggregation pipeline.
- boost::shared_ptr<PlanExecutor> innerExec(
- makeIndexScanExec(ctx.ctx(), indexSpec, 7, 10));
-
- // Create the aggregation pipeline.
- boost::intrusive_ptr<ExpressionContext> expCtx =
- new ExpressionContext(&_txn, NamespaceString(ns()));
-
- string errmsg;
- BSONObj inputBson = fromjson("{$match: {a: {$gte: 7, $lte: 10}}}");
- boost::intrusive_ptr<Pipeline> pipeline =
- Pipeline::parseCommand(errmsg, inputBson, expCtx);
- ASSERT_EQUALS(errmsg, "");
-
- // Create the output PlanExecutor that pulls results from the pipeline.
- std::auto_ptr<WorkingSet> ws(new WorkingSet());
- std::auto_ptr<PipelineProxyStage> proxy(
- new PipelineProxyStage(pipeline, innerExec, ws.get()));
- Collection* collection = ctx.getCollection();
+ int ids[] = {3, 4, 2};
+ checkIds(ids, exec.get());
+ }
+};
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws.release(), proxy.release(), collection,
- PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> outerExec(rawExec);
+/**
+ * A snapshot is really just a hint that means scan the _id index.
+ * Make sure that we do not see the document move with an _id
+ * index scan.
+ */
+class SnapshotTest : public SnapshotBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ setupCollection();
+ BSONObj indexSpec = BSON("_id" << 1);
+ addIndex(indexSpec);
- // Only the outer executor gets registered.
- registerExec(outerExec.get());
+ BSONObj filterObj = fromjson("{a: {$gte: 2}}");
+ scoped_ptr<PlanExecutor> exec(makeIndexScanExec(ctx.ctx(), indexSpec, 2, 5));
- // Verify that both the "inner" and "outer" plan executors have been killed after
- // dropping the collection.
- BSONObj objOut;
- dropCollection();
- ASSERT_EQUALS(PlanExecutor::DEAD, innerExec->getNext(&objOut, NULL));
- ASSERT_EQUALS(PlanExecutor::DEAD, outerExec->getNext(&objOut, NULL));
+ BSONObj objOut;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(2, objOut["a"].numberInt());
- deregisterExec(outerExec.get());
- }
- };
-
- class SnapshotBase : public PlanExecutorBase {
- protected:
- void setupCollection() {
- insert(BSON("_id" << 1 << "a" << 1));
- insert(BSON("_id" << 2 << "a" << 2 << "payload" << "x"));
- insert(BSON("_id" << 3 << "a" << 3));
- insert(BSON("_id" << 4 << "a" << 4));
- }
+ forceDocumentMove();
- /**
- * Increases a document's size dramatically such that the document
- * exceeds the available padding and must be moved to the end of
- * the collection.
- */
- void forceDocumentMove() {
- BSONObj query = BSON("_id" << 2);
- BSONObj updateSpec = BSON("$set" << BSON("payload" << payload8k()));
- update(query, updateSpec);
- }
+ // Since this time we're scanning the _id index,
+ // we should not see the moved document again.
+ int ids[] = {3, 4};
+ checkIds(ids, exec.get());
+ }
+};
- std::string payload8k() {
- return std::string(8*1024, 'x');
- }
+namespace ClientCursor {
- /**
- * Given an array of ints, 'expectedIds', and a PlanExecutor,
- * 'exec', uses the executor to iterate through the collection. While
- * iterating, asserts that the _id of each successive document equals
- * the respective integer in 'expectedIds'.
- */
- void checkIds(int* expectedIds, PlanExecutor* exec) {
- BSONObj objOut;
- int idcount = 0;
- while (PlanExecutor::ADVANCED == exec->getNext(&objOut, NULL)) {
- ASSERT_EQUALS(expectedIds[idcount], objOut["_id"].numberInt());
- ++idcount;
- }
- }
- };
+using mongo::ClientCursor;
- /**
- * Create a scenario in which the same document is returned
- * twice due to a concurrent document move and collection
- * scan.
- */
- class SnapshotControl : public SnapshotBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- setupCollection();
+/**
+ * Test invalidation of ClientCursor.
+ */
+class Invalidate : public PlanExecutorBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ insert(BSON("a" << 1 << "b" << 1));
- BSONObj filterObj = fromjson("{a: {$gte: 2}}");
+ BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
- Collection* coll = ctx.getCollection();
- scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
+ Collection* coll = ctx.getCollection();
+ PlanExecutor* exec = makeCollScanExec(coll, filterObj);
- BSONObj objOut;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
- ASSERT_EQUALS(2, objOut["a"].numberInt());
+ // Make a client cursor from the runner.
+ new ClientCursor(coll->getCursorManager(), exec, ns(), 0, BSONObj());
- forceDocumentMove();
+ // There should be one cursor before invalidation,
+ // and zero cursors after invalidation.
+ ASSERT_EQUALS(1U, numCursors());
+ coll->getCursorManager()->invalidateAll(false);
+ ASSERT_EQUALS(0U, numCursors());
+ }
+};
- int ids[] = {3, 4, 2};
- checkIds(ids, exec.get());
- }
- };
+/**
+ * Test that pinned client cursors persist even after
+ * invalidation.
+ */
+class InvalidatePinned : public PlanExecutorBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ insert(BSON("a" << 1 << "b" << 1));
+
+ Collection* collection = ctx.getCollection();
+
+ BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
+ PlanExecutor* exec = makeCollScanExec(collection, filterObj);
+
+ // Make a client cursor from the runner.
+ ClientCursor* cc =
+ new ClientCursor(collection->getCursorManager(), exec, ns(), 0, BSONObj());
+ ClientCursorPin ccPin(collection->getCursorManager(), cc->cursorid());
+
+ // If the cursor is pinned, it sticks around,
+ // even after invalidation.
+ ASSERT_EQUALS(1U, numCursors());
+ collection->getCursorManager()->invalidateAll(false);
+ ASSERT_EQUALS(1U, numCursors());
+
+ // The invalidation should have killed the runner.
+ BSONObj objOut;
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
+
+ // Deleting the underlying cursor should cause the
+ // number of cursors to return to 0.
+ ccPin.deleteUnderlying();
+ ASSERT_EQUALS(0U, numCursors());
+ }
+};
- /**
- * A snapshot is really just a hint that means scan the _id index.
- * Make sure that we do not see the document move with an _id
- * index scan.
- */
- class SnapshotTest : public SnapshotBase {
- public:
- void run() {
+/**
+ * Test that client cursors time out and get
+ * deleted.
+ */
+class Timeout : public PlanExecutorBase {
+public:
+ void run() {
+ {
Client::WriteContext ctx(&_txn, ns());
- setupCollection();
- BSONObj indexSpec = BSON("_id" << 1);
- addIndex(indexSpec);
-
- BSONObj filterObj = fromjson("{a: {$gte: 2}}");
- scoped_ptr<PlanExecutor> exec(makeIndexScanExec(ctx.ctx(), indexSpec, 2, 5));
+ insert(BSON("a" << 1 << "b" << 1));
+ }
- BSONObj objOut;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
- ASSERT_EQUALS(2, objOut["a"].numberInt());
+ {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
- forceDocumentMove();
+ BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
+ PlanExecutor* exec = makeCollScanExec(collection, filterObj);
- // Since this time we're scanning the _id index,
- // we should not see the moved document again.
- int ids[] = {3, 4};
- checkIds(ids, exec.get());
- }
- };
-
- namespace ClientCursor {
-
- using mongo::ClientCursor;
-
- /**
- * Test invalidation of ClientCursor.
- */
- class Invalidate : public PlanExecutorBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- insert(BSON("a" << 1 << "b" << 1));
-
- BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
-
- Collection* coll = ctx.getCollection();
- PlanExecutor* exec = makeCollScanExec(coll,filterObj);
-
- // Make a client cursor from the runner.
- new ClientCursor(coll->getCursorManager(), exec, ns(), 0, BSONObj());
-
- // There should be one cursor before invalidation,
- // and zero cursors after invalidation.
- ASSERT_EQUALS(1U, numCursors());
- coll->getCursorManager()->invalidateAll(false);
- ASSERT_EQUALS(0U, numCursors());
- }
- };
-
- /**
- * Test that pinned client cursors persist even after
- * invalidation.
- */
- class InvalidatePinned : public PlanExecutorBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- insert(BSON("a" << 1 << "b" << 1));
-
- Collection* collection = ctx.getCollection();
-
- BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
- PlanExecutor* exec = makeCollScanExec(collection, filterObj);
-
- // Make a client cursor from the runner.
- ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
- exec,
- ns(),
- 0,
- BSONObj());
- ClientCursorPin ccPin(collection->getCursorManager(), cc->cursorid());
-
- // If the cursor is pinned, it sticks around,
- // even after invalidation.
- ASSERT_EQUALS(1U, numCursors());
- collection->getCursorManager()->invalidateAll(false);
- ASSERT_EQUALS(1U, numCursors());
-
- // The invalidation should have killed the runner.
- BSONObj objOut;
- ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
-
- // Deleting the underlying cursor should cause the
- // number of cursors to return to 0.
- ccPin.deleteUnderlying();
- ASSERT_EQUALS(0U, numCursors());
- }
- };
-
- /**
- * Test that client cursors time out and get
- * deleted.
- */
- class Timeout : public PlanExecutorBase {
- public:
- void run() {
- {
- Client::WriteContext ctx(&_txn, ns());
- insert(BSON("a" << 1 << "b" << 1));
- }
-
- {
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
-
- BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
- PlanExecutor* exec = makeCollScanExec(collection, filterObj);
-
- // Make a client cursor from the runner.
- new ClientCursor(collection->getCursorManager(), exec, ns(), 0, BSONObj());
- }
-
- // There should be one cursor before timeout,
- // and zero cursors after timeout.
- ASSERT_EQUALS(1U, numCursors());
- CursorManager::timeoutCursorsGlobal(&_txn, 600001);
- ASSERT_EQUALS(0U, numCursors());
- }
- };
-
- } // namespace ClientCursor
-
- class All : public Suite {
- public:
- All() : Suite( "query_plan_executor" ) { }
-
- void setupTests() {
- add<DropCollScan>();
- add<DropIndexScan>();
- add<DropIndexScanAgg>();
- add<SnapshotControl>();
- add<SnapshotTest>();
- add<ClientCursor::Invalidate>();
- add<ClientCursor::InvalidatePinned>();
- add<ClientCursor::Timeout>();
+ // Make a client cursor from the runner.
+ new ClientCursor(collection->getCursorManager(), exec, ns(), 0, BSONObj());
}
- };
- SuiteInstance<All> queryPlanExecutorAll;
+ // There should be one cursor before timeout,
+ // and zero cursors after timeout.
+ ASSERT_EQUALS(1U, numCursors());
+ CursorManager::timeoutCursorsGlobal(&_txn, 600001);
+ ASSERT_EQUALS(0U, numCursors());
+ }
+};
+
+} // namespace ClientCursor
+
+class All : public Suite {
+public:
+ All() : Suite("query_plan_executor") {}
+
+ void setupTests() {
+ add<DropCollScan>();
+ add<DropIndexScan>();
+ add<DropIndexScanAgg>();
+ add<SnapshotControl>();
+ add<SnapshotTest>();
+ add<ClientCursor::Invalidate>();
+ add<ClientCursor::InvalidatePinned>();
+ add<ClientCursor::Timeout>();
+ }
+};
+
+SuiteInstance<All> queryPlanExecutorAll;
} // namespace QueryPlanExecutor
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 44eca7ed214..ab1ba6196a6 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -51,1375 +51,1392 @@
namespace QueryStageAnd {
- using boost::scoped_ptr;
- using boost::shared_ptr;
- using std::auto_ptr;
- using std::set;
-
- class QueryStageAndBase {
- public:
- QueryStageAndBase() : _client(&_txn) {
-
+using boost::scoped_ptr;
+using boost::shared_ptr;
+using std::auto_ptr;
+using std::set;
+
+class QueryStageAndBase {
+public:
+ QueryStageAndBase() : _client(&_txn) {}
+
+ virtual ~QueryStageAndBase() {
+ _client.dropCollection(ns());
+ }
+
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) {
+ IndexDescriptor* descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, obj);
+ if (NULL == descriptor) {
+ FAIL(mongoutils::str::stream() << "Unable to find index with key pattern " << obj);
}
-
- virtual ~QueryStageAndBase() {
- _client.dropCollection(ns());
+ return descriptor;
+ }
+
+ void getLocs(set<RecordId>* out, Collection* coll) {
+ RecordIterator* it = coll->getIterator(&_txn, RecordId(), CollectionScanParams::FORWARD);
+ while (!it->isEOF()) {
+ RecordId nextLoc = it->getNext();
+ out->insert(nextLoc);
}
+ delete it;
+ }
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
- }
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
- IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) {
- IndexDescriptor* descriptor = coll->getIndexCatalog()->findIndexByKeyPattern( &_txn, obj );
- if (NULL == descriptor) {
- FAIL(mongoutils::str::stream() << "Unable to find index with key pattern " << obj);
- }
- return descriptor;
- }
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
- void getLocs(set<RecordId>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(&_txn, RecordId(),
- CollectionScanParams::FORWARD);
- while (!it->isEOF()) {
- RecordId nextLoc = it->getNext();
- out->insert(nextLoc);
+ /**
+ * Executes plan stage until EOF.
+ * Returns number of results seen if execution reaches EOF successfully.
+ * Otherwise, returns -1 on stage failure.
+ */
+ int countResults(PlanStage* stage) {
+ int count = 0;
+ while (!stage->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = stage->work(&id);
+ if (PlanStage::FAILURE == status) {
+ return -1;
+ }
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
- delete it;
+ ++count;
}
+ return count;
+ }
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
- }
+ /**
+ * Gets the next result from 'stage'.
+ *
+ * Fails if the stage fails or returns DEAD, if the returned working
+ * set member is not fetched, or if there are no more results.
+ */
+ BSONObj getNext(PlanStage* stage, WorkingSet* ws) {
+ while (!stage->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = stage->work(&id);
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
- }
+ // We shouldn't fail or be dead.
+ ASSERT(PlanStage::FAILURE != status);
+ ASSERT(PlanStage::DEAD != status);
- /**
- * Executes plan stage until EOF.
- * Returns number of results seen if execution reaches EOF successfully.
- * Otherwise, returns -1 on stage failure.
- */
- int countResults(PlanStage* stage) {
- int count = 0;
- while (!stage->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = stage->work(&id);
- if (PlanStage::FAILURE == status) {
- return -1;
- }
- if (PlanStage::ADVANCED != status) { continue; }
- ++count;
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
- return count;
+
+ WorkingSetMember* member = ws->get(id);
+ ASSERT(member->hasObj());
+ return member->obj.value();
}
- /**
- * Gets the next result from 'stage'.
- *
- * Fails if the stage fails or returns DEAD, if the returned working
- * set member is not fetched, or if there are no more results.
- */
- BSONObj getNext(PlanStage* stage, WorkingSet* ws) {
- while (!stage->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = stage->work(&id);
-
- // We shouldn't fail or be dead.
- ASSERT(PlanStage::FAILURE != status);
- ASSERT(PlanStage::DEAD != status);
-
- if (PlanStage::ADVANCED != status) { continue; }
-
- WorkingSetMember* member = ws->get(id);
- ASSERT(member->hasObj());
- return member->obj.value();
- }
+ // We failed to produce a result.
+ ASSERT(false);
+ return BSONObj();
+ }
- // We failed to produce a result.
- ASSERT(false);
- return BSONObj();
- }
+ static const char* ns() {
+ return "unittests.QueryStageAnd";
+ }
- static const char* ns() { return "unittests.QueryStageAnd"; }
+protected:
+ OperationContextImpl _txn;
- protected:
- OperationContextImpl _txn;
+private:
+ DBDirectClient _client;
+};
- private:
- DBDirectClient _client;
- };
+//
+// Hash AND tests
+//
- //
- // Hash AND tests
- //
+/**
+ * Invalidate a RecordId held by a hashed AND before the AND finishes evaluating. The AND should
+ * process all other data just fine and flag the invalidated RecordId in the WorkingSet.
+ */
+class QueryStageAndHashInvalidation : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- /**
- * Invalidate a RecordId held by a hashed AND before the AND finishes evaluating. The AND should
- * process all other data just fine and flag the invalidated RecordId in the WorkingSet.
- */
- class QueryStageAndHashInvalidation : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i));
+ }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i));
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 10);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // ah reads the first child into its hash table.
+ // ah should read foo=20, foo=19, ..., foo=0 in that order.
+ // Read half of them...
+ for (int i = 0; i < 10; ++i) {
+ WorkingSetID out;
+ PlanStage::StageState status = ah->work(&out);
+ ASSERT_EQUALS(PlanStage::NEED_TIME, status);
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 10);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // ah reads the first child into its hash table.
- // ah should read foo=20, foo=19, ..., foo=0 in that order.
- // Read half of them...
- for (int i = 0; i < 10; ++i) {
- WorkingSetID out;
- PlanStage::StageState status = ah->work(&out);
- ASSERT_EQUALS(PlanStage::NEED_TIME, status);
+ // ...yield
+ ah->saveState();
+ // ...invalidate one of the read objects
+ set<RecordId> data;
+ getLocs(&data, coll);
+ size_t memUsageBefore = ah->getMemUsage();
+ for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
+ if (coll->docFor(&_txn, *it).value()["foo"].numberInt() == 15) {
+ ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
+ remove(coll->docFor(&_txn, *it).value());
+ break;
}
-
- // ...yield
- ah->saveState();
- // ...invalidate one of the read objects
- set<RecordId> data;
- getLocs(&data, coll);
- size_t memUsageBefore = ah->getMemUsage();
- for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (coll->docFor(&_txn, *it).value()["foo"].numberInt() == 15) {
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it).value());
- break;
- }
+ }
+ size_t memUsageAfter = ah->getMemUsage();
+ ah->restoreState(&_txn);
+
+ // Invalidating a read object should decrease memory usage.
+ ASSERT_LESS_THAN(memUsageAfter, memUsageBefore);
+
+ // And expect to find foo==15 it flagged for review.
+ const unordered_set<WorkingSetID>& flagged = ws.getFlagged();
+ ASSERT_EQUALS(size_t(1), flagged.size());
+
+ // Expect to find the right value of foo in the flagged item.
+ WorkingSetMember* member = ws.get(*flagged.begin());
+ ASSERT_TRUE(NULL != member);
+ ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state);
+ BSONElement elt;
+ ASSERT_TRUE(member->getFieldDotted("foo", &elt));
+ ASSERT_EQUALS(15, elt.numberInt());
+
+ // Now, finish up the AND. Since foo == bar, we would have 11 results, but we subtract
+ // one because of a mid-plan invalidation, so 10.
+ int count = 0;
+ while (!ah->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ah->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
- size_t memUsageAfter = ah->getMemUsage();
- ah->restoreState(&_txn);
-
- // Invalidating a read object should decrease memory usage.
- ASSERT_LESS_THAN(memUsageAfter, memUsageBefore);
- // And expect to find foo==15 it flagged for review.
- const unordered_set<WorkingSetID>& flagged = ws.getFlagged();
- ASSERT_EQUALS(size_t(1), flagged.size());
+ ++count;
+ member = ws.get(id);
- // Expect to find the right value of foo in the flagged item.
- WorkingSetMember* member = ws.get(*flagged.begin());
- ASSERT_TRUE(NULL != member);
- ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state);
- BSONElement elt;
ASSERT_TRUE(member->getFieldDotted("foo", &elt));
- ASSERT_EQUALS(15, elt.numberInt());
-
- // Now, finish up the AND. Since foo == bar, we would have 11 results, but we subtract
- // one because of a mid-plan invalidation, so 10.
- int count = 0;
- while (!ah->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ah->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
-
- ++count;
- member = ws.get(id);
-
- ASSERT_TRUE(member->getFieldDotted("foo", &elt));
- ASSERT_LESS_THAN_OR_EQUALS(elt.numberInt(), 20);
- ASSERT_NOT_EQUALS(15, elt.numberInt());
- ASSERT_TRUE(member->getFieldDotted("bar", &elt));
- ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10);
- }
+ ASSERT_LESS_THAN_OR_EQUALS(elt.numberInt(), 20);
+ ASSERT_NOT_EQUALS(15, elt.numberInt());
+ ASSERT_TRUE(member->getFieldDotted("bar", &elt));
+ ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10);
+ }
- ASSERT_EQUALS(10, count);
+ ASSERT_EQUALS(10, count);
+ }
+};
+
+// Invalidate one of the "are we EOF?" lookahead results.
+class QueryStageAndHashInvalidateLookahead : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // Invalidate one of the "are we EOF?" lookahead results.
- class QueryStageAndHashInvalidateLookahead : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("_id" << i << "foo" << i << "bar" << i << "baz" << i));
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("_id" << i << "foo" << i << "bar" << i << "baz" << i));
+ }
+
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+ addIndex(BSON("baz" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo <= 20 (descending)
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar <= 19 (descending)
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 19);
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // First call to work reads the first result from the children.
+ // The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}.
+ // The first result is for the second scan over bar is {foo: 19, bar: 19, baz: 19}.
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ah->work(&id);
+ ASSERT_EQUALS(PlanStage::NEED_TIME, status);
+
+ const unordered_set<WorkingSetID>& flagged = ws.getFlagged();
+ ASSERT_EQUALS(size_t(0), flagged.size());
+
+ // "delete" deletedObj (by invalidating the RecordId of the obj that matches it).
+ BSONObj deletedObj = BSON("_id" << 20 << "foo" << 20 << "bar" << 20 << "baz" << 20);
+ ah->saveState();
+ set<RecordId> data;
+ getLocs(&data, coll);
+
+ size_t memUsageBefore = ah->getMemUsage();
+ for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
+ if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it).value())) {
+ ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
+ break;
}
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
- addIndex(BSON("baz" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo <= 20 (descending)
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar <= 19 (descending)
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 19);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // First call to work reads the first result from the children.
- // The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}.
- // The first result is for the second scan over bar is {foo: 19, bar: 19, baz: 19}.
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ah->work(&id);
- ASSERT_EQUALS(PlanStage::NEED_TIME, status);
+ size_t memUsageAfter = ah->getMemUsage();
+ // Look ahead results do not count towards memory usage.
+ ASSERT_EQUALS(memUsageBefore, memUsageAfter);
- const unordered_set<WorkingSetID>& flagged = ws.getFlagged();
- ASSERT_EQUALS(size_t(0), flagged.size());
-
- // "delete" deletedObj (by invalidating the RecordId of the obj that matches it).
- BSONObj deletedObj = BSON("_id" << 20 << "foo" << 20 << "bar" << 20 << "baz" << 20);
- ah->saveState();
- set<RecordId> data;
- getLocs(&data, coll);
-
- size_t memUsageBefore = ah->getMemUsage();
- for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it).value())) {
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- break;
- }
- }
+ ah->restoreState(&_txn);
+
+ // The deleted obj should show up in flagged.
+ ASSERT_EQUALS(size_t(1), flagged.size());
- size_t memUsageAfter = ah->getMemUsage();
- // Look ahead results do not count towards memory usage.
- ASSERT_EQUALS(memUsageBefore, memUsageAfter);
-
- ah->restoreState(&_txn);
-
- // The deleted obj should show up in flagged.
- ASSERT_EQUALS(size_t(1), flagged.size());
-
- // And not in our results.
- int count = 0;
- while (!ah->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ah->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
- WorkingSetMember* wsm = ws.get(id);
- ASSERT_NOT_EQUALS(0,
- deletedObj.woCompare(coll->docFor(&_txn, wsm->loc).value()));
- ++count;
+ // And not in our results.
+ int count = 0;
+ while (!ah->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ah->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
+ WorkingSetMember* wsm = ws.get(id);
+ ASSERT_NOT_EQUALS(0, deletedObj.woCompare(coll->docFor(&_txn, wsm->loc).value()));
+ ++count;
+ }
- ASSERT_EQUALS(count, 20);
+ ASSERT_EQUALS(count, 20);
+ }
+};
+
+// An AND with two children.
+class QueryStageAndHashTwoLeaf : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // An AND with two children.
- class QueryStageAndHashTwoLeaf : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i));
- }
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i));
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 10);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // foo == bar == baz, and foo<=20, bar>=10, so our values are:
- // foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
- ASSERT_EQUALS(11, countResults(ah.get()));
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 10);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // foo == bar == baz, and foo<=20, bar>=10, so our values are:
+ // foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
+ ASSERT_EQUALS(11, countResults(ah.get()));
+ }
+};
+
+// An AND with two children.
+// Add large keys (512 bytes) to index of first child to cause
+// internal buffer within hashed AND to exceed threshold (32MB)
+// before gathering all requested results.
+class QueryStageAndHashTwoLeafFirstChildLargeKeys : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // An AND with two children.
- // Add large keys (512 bytes) to index of first child to cause
- // internal buffer within hashed AND to exceed threshold (32MB)
- // before gathering all requested results.
- class QueryStageAndHashTwoLeafFirstChildLargeKeys : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- // Generate large keys for {foo: 1, big: 1} index.
- std::string big(512, 'a');
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i << "big" << big));
- }
+ // Generate large keys for {foo: 1, big: 1} index.
+ std::string big(512, 'a');
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i << "big" << big));
+ }
- addIndex(BSON("foo" << 1 << "big" << 1));
- addIndex(BSON("bar" << 1));
-
- // Lower buffer limit to 20 * sizeof(big) to force memory error
- // before hashed AND is done reading the first child (stage has to
- // hold 21 keys in buffer for Foo <= 20).
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 20 * big.size()));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1 << "big" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20 << "" << big);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 10);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Stage execution should fail.
- ASSERT_EQUALS(-1, countResults(ah.get()));
+ addIndex(BSON("foo" << 1 << "big" << 1));
+ addIndex(BSON("bar" << 1));
+
+ // Lower buffer limit to 20 * sizeof(big) to force memory error
+ // before hashed AND is done reading the first child (stage has to
+ // hold 21 keys in buffer for Foo <= 20).
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 20 * big.size()));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1 << "big" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20 << "" << big);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 10);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Stage execution should fail.
+ ASSERT_EQUALS(-1, countResults(ah.get()));
+ }
+};
+
+// An AND with three children.
+// Add large keys (512 bytes) to index of last child to verify that
+// keys in last child are not buffered
+class QueryStageAndHashTwoLeafLastChildLargeKeys : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // An AND with three children.
- // Add large keys (512 bytes) to index of last child to verify that
- // keys in last child are not buffered
- class QueryStageAndHashTwoLeafLastChildLargeKeys : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- // Generate large keys for {baz: 1, big: 1} index.
- std::string big(512, 'a');
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i << "big" << big));
- }
+ // Generate large keys for {baz: 1, big: 1} index.
+ std::string big(512, 'a');
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i << "big" << big));
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1 << "big" << 1));
-
- // Lower buffer limit to 5 * sizeof(big) to ensure that
- // keys in last child's index are not buffered. There are 6 keys
- // that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 5 * big.size()));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
- params.bounds.startKey = BSON("" << 10 << "" << big);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // foo == bar == baz, and foo<=20, bar>=10, so our values are:
- // foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
- ASSERT_EQUALS(11, countResults(ah.get()));
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1 << "big" << 1));
+
+ // Lower buffer limit to 5 * sizeof(big) to ensure that
+ // keys in last child's index are not buffered. There are 6 keys
+ // that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 5 * big.size()));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
+ params.bounds.startKey = BSON("" << 10 << "" << big);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // foo == bar == baz, and foo<=20, bar>=10, so our values are:
+ // foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
+ ASSERT_EQUALS(11, countResults(ah.get()));
+ }
+};
+
+// An AND with three children.
+class QueryStageAndHashThreeLeaf : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // An AND with three children.
- class QueryStageAndHashThreeLeaf : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i << "baz" << i));
- }
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i << "baz" << i));
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
- addIndex(BSON("baz" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 10);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // 5 <= baz <= 15
- params.descriptor = getIndex(BSON("baz" << 1), coll);
- params.bounds.startKey = BSON("" << 5);
- params.bounds.endKey = BSON("" << 15);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
- // foo == 10, 11, 12, 13, 14, 15.
- ASSERT_EQUALS(6, countResults(ah.get()));
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+ addIndex(BSON("baz" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 10);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // 5 <= baz <= 15
+ params.descriptor = getIndex(BSON("baz" << 1), coll);
+ params.bounds.startKey = BSON("" << 5);
+ params.bounds.endKey = BSON("" << 15);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
+ // foo == 10, 11, 12, 13, 14, 15.
+ ASSERT_EQUALS(6, countResults(ah.get()));
+ }
+};
+
+// An AND with three children.
+// Add large keys (512 bytes) to index of second child to cause
+// internal buffer within hashed AND to exceed threshold (32MB)
+// before gathering all requested results.
+// We need 3 children because the hashed AND stage buffered data for
+// N-1 of its children. If the second child is the last child, it will not
+// be buffered.
+class QueryStageAndHashThreeLeafMiddleChildLargeKeys : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // An AND with three children.
- // Add large keys (512 bytes) to index of second child to cause
- // internal buffer within hashed AND to exceed threshold (32MB)
- // before gathering all requested results.
- // We need 3 children because the hashed AND stage buffered data for
- // N-1 of its children. If the second child is the last child, it will not
- // be buffered.
- class QueryStageAndHashThreeLeafMiddleChildLargeKeys : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- // Generate large keys for {bar: 1, big: 1} index.
- std::string big(512, 'a');
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i << "baz" << i << "big" << big));
- }
+ // Generate large keys for {bar: 1, big: 1} index.
+ std::string big(512, 'a');
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i << "baz" << i << "big" << big));
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1 << "big" << 1));
- addIndex(BSON("baz" << 1));
-
- // Lower buffer limit to 10 * sizeof(big) to force memory error
- // before hashed AND is done reading the second child (stage has to
- // hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 10 * big.size()));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
- params.bounds.startKey = BSON("" << 10 << "" << big);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // 5 <= baz <= 15
- params.descriptor = getIndex(BSON("baz" << 1), coll);
- params.bounds.startKey = BSON("" << 5);
- params.bounds.endKey = BSON("" << 15);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Stage execution should fail.
- ASSERT_EQUALS(-1, countResults(ah.get()));
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1 << "big" << 1));
+ addIndex(BSON("baz" << 1));
+
+ // Lower buffer limit to 10 * sizeof(big) to force memory error
+ // before hashed AND is done reading the second child (stage has to
+ // hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 10 * big.size()));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
+ params.bounds.startKey = BSON("" << 10 << "" << big);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // 5 <= baz <= 15
+ params.descriptor = getIndex(BSON("baz" << 1), coll);
+ params.bounds.startKey = BSON("" << 5);
+ params.bounds.endKey = BSON("" << 15);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Stage execution should fail.
+ ASSERT_EQUALS(-1, countResults(ah.get()));
+ }
+};
+
+// An AND with an index scan that returns nothing.
+class QueryStageAndHashWithNothing : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // An AND with an index scan that returns nothing.
- class QueryStageAndHashWithNothing : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << 20));
- }
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << 20));
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar == 5. Index scan should be eof.
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 5);
- params.bounds.endKey = BSON("" << 5);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- int count = 0;
- int works = 0;
- while (!ah->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- ++works;
- PlanStage::StageState status = ah->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
- ++count;
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar == 5. Index scan should be eof.
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 5);
+ params.bounds.endKey = BSON("" << 5);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ int count = 0;
+ int works = 0;
+ while (!ah->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ ++works;
+ PlanStage::StageState status = ah->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
+ ++count;
+ }
- ASSERT_EQUALS(0, count);
+ ASSERT_EQUALS(0, count);
+
+ // We check the "look ahead for EOF" here by examining the number of works required to
+ // hit EOF. Our first call to work will pick up that bar==5 is EOF and the AND will EOF
+ // immediately.
+ ASSERT_EQUALS(works, 1);
+ }
+};
+
+// An AND that scans data but returns nothing.
+class QueryStageAndHashProducesNothing : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- // We check the "look ahead for EOF" here by examining the number of works required to
- // hit EOF. Our first call to work will pick up that bar==5 is EOF and the AND will EOF
- // immediately.
- ASSERT_EQUALS(works, 1);
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("foo" << (100 + i)));
+ insert(BSON("bar" << i));
}
- };
-
- // An AND that scans data but returns nothing.
- class QueryStageAndHashProducesNothing : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 10; ++i) {
- insert(BSON("foo" << (100 + i)));
- insert(BSON("bar" << i));
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo >= 100
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 100);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar <= 100
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 100);
+ // This is subtle and confusing. We couldn't extract any keys from the elements with
+ // 'foo' in them so we would normally index them with the "nothing found" key. We don't
+ // want to include that in our scan.
+ params.bounds.endKey = BSON(""
+ << "");
+ params.bounds.endKeyInclusive = false;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ ASSERT_EQUALS(0, countResults(ah.get()));
+ }
+};
+
+// An AND that would return more data but the matcher filters it.
+class QueryStageAndHashWithMatcher : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo >= 100
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 100);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar <= 100
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 100);
- // This is subtle and confusing. We couldn't extract any keys from the elements with
- // 'foo' in them so we would normally index them with the "nothing found" key. We don't
- // want to include that in our scan.
- params.bounds.endKey = BSON("" << "");
- params.bounds.endKeyInclusive = false;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- ASSERT_EQUALS(0, countResults(ah.get()));
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << (100 - i)));
}
- };
-
- // An AND that would return more data but the matcher filters it.
- class QueryStageAndHashWithMatcher : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << (100 - i)));
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ BSONObj filter = BSON("bar" << 97);
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(filter);
+ verify(swme.isOK());
+ auto_ptr<MatchExpression> filterExpr(swme.getValue());
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, filterExpr.get(), coll));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 95
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 10);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar == 97
+ ASSERT_EQUALS(1, countResults(ah.get()));
+ }
+};
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- BSONObj filter = BSON("bar" << 97);
- StatusWithMatchExpression swme = MatchExpressionParser::parse(filter);
- verify(swme.isOK());
- auto_ptr<MatchExpression> filterExpr(swme.getValue());
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, filterExpr.get(), coll));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 95
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 10);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar == 97
- ASSERT_EQUALS(1, countResults(ah.get()));
+/**
+ * SERVER-14607: Check that hash-based intersection works when the first
+ * child returns fetched docs but the second child returns index keys.
+ */
+class QueryStageAndHashFirstChildFetched : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- /**
- * SERVER-14607: Check that hash-based intersection works when the first
- * child returns fetched docs but the second child returns index keys.
- */
- class QueryStageAndHashFirstChildFetched : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i));
- }
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i));
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
-
- // First child of the AND_HASH stage is a Fetch. The NULL in the
- // constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
- ah->addChild(fetch);
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 10);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Check that the AndHash stage returns docs {foo: 10, bar: 10}
- // through {foo: 20, bar: 20}.
- for (int i = 10; i <= 20; i++) {
- BSONObj obj = getNext(ah.get(), &ws);
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- ASSERT_EQUALS(i, obj["bar"].numberInt());
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
+
+ // First child of the AND_HASH stage is a Fetch. The NULL in the
+ // constructor means there is no filter.
+ FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
+ ah->addChild(fetch);
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 10);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Check that the AndHash stage returns docs {foo: 10, bar: 10}
+ // through {foo: 20, bar: 20}.
+ for (int i = 10; i <= 20; i++) {
+ BSONObj obj = getNext(ah.get(), &ws);
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
+ ASSERT_EQUALS(i, obj["bar"].numberInt());
}
- };
+ }
+};
- /**
- * SERVER-14607: Check that hash-based intersection works when the first
- * child returns index keys but the second returns fetched docs.
- */
- class QueryStageAndHashSecondChildFetched : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+/**
+ * SERVER-14607: Check that hash-based intersection works when the first
+ * child returns index keys but the second returns fetched docs.
+ */
+class QueryStageAndHashSecondChildFetched : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << i << "bar" << i));
- }
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << i << "bar" << i));
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar >= 10
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 10);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
-
- // Second child of the AND_HASH stage is a Fetch. The NULL in the
- // constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
- ah->addChild(fetch);
-
- // Check that the AndHash stage returns docs {foo: 10, bar: 10}
- // through {foo: 20, bar: 20}.
- for (int i = 10; i <= 20; i++) {
- BSONObj obj = getNext(ah.get(), &ws);
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- ASSERT_EQUALS(i, obj["bar"].numberInt());
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
+
+ // Foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar >= 10
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 10);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
+
+ // Second child of the AND_HASH stage is a Fetch. The NULL in the
+ // constructor means there is no filter.
+ FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
+ ah->addChild(fetch);
+
+ // Check that the AndHash stage returns docs {foo: 10, bar: 10}
+ // through {foo: 20, bar: 20}.
+ for (int i = 10; i <= 20; i++) {
+ BSONObj obj = getNext(ah.get(), &ws);
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
+ ASSERT_EQUALS(i, obj["bar"].numberInt());
}
- };
+ }
+};
- //
- // Sorted AND tests
- //
+//
+// Sorted AND tests
+//
- /**
- * Invalidate a RecordId held by a sorted AND before the AND finishes evaluating. The AND should
- * process all other data just fine and flag the invalidated RecordId in the WorkingSet.
- */
- class QueryStageAndSortedInvalidation : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+/**
+ * Invalidate a RecordId held by a sorted AND before the AND finishes evaluating. The AND should
+ * process all other data just fine and flag the invalidated RecordId in the WorkingSet.
+ */
+class QueryStageAndSortedInvalidation : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- // Insert a bunch of data
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << 1 << "bar" << 1));
- }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
-
- // Scan over foo == 1
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 1);
- params.bounds.endKey = BSON("" << 1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Scan over bar == 1
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Get the set of disklocs in our collection to use later.
- set<RecordId> data;
- getLocs(&data, coll);
-
- // We're making an assumption here that happens to be true because we clear out the
- // collection before running this: increasing inserts have increasing RecordIds.
- // This isn't true in general if the collection is not dropped beforehand.
+ // Insert a bunch of data
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << 1 << "bar" << 1));
+ }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
+
+ // Scan over foo == 1
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 1);
+ params.bounds.endKey = BSON("" << 1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Scan over bar == 1
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Get the set of disklocs in our collection to use later.
+ set<RecordId> data;
+ getLocs(&data, coll);
+
+ // We're making an assumption here that happens to be true because we clear out the
+ // collection before running this: increasing inserts have increasing RecordIds.
+ // This isn't true in general if the collection is not dropped beforehand.
+ WorkingSetID id = WorkingSet::INVALID_ID;
+
+ // Sorted AND looks at the first child, which is an index scan over foo==1.
+ ah->work(&id);
+
+ // The first thing that the index scan returns (due to increasing RecordId trick) is the
+ // very first insert, which should be the very first thing in data. Let's invalidate it
+ // and make sure it shows up in the flagged results.
+ ah->saveState();
+ ah->invalidate(&_txn, *data.begin(), INVALIDATION_DELETION);
+ remove(coll->docFor(&_txn, *data.begin()).value());
+ ah->restoreState(&_txn);
+
+ // Make sure the nuked obj is actually in the flagged data.
+ ASSERT_EQUALS(ws.getFlagged().size(), size_t(1));
+ WorkingSetMember* member = ws.get(*ws.getFlagged().begin());
+ ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state);
+ BSONElement elt;
+ ASSERT_TRUE(member->getFieldDotted("foo", &elt));
+ ASSERT_EQUALS(1, elt.numberInt());
+ ASSERT_TRUE(member->getFieldDotted("bar", &elt));
+ ASSERT_EQUALS(1, elt.numberInt());
+
+ set<RecordId>::iterator it = data.begin();
+
+ // Proceed along, AND-ing results.
+ int count = 0;
+ while (!ah->isEOF() && count < 10) {
WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ah->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
+ }
+
+ ++count;
+ ++it;
+ member = ws.get(id);
- // Sorted AND looks at the first child, which is an index scan over foo==1.
- ah->work(&id);
-
- // The first thing that the index scan returns (due to increasing RecordId trick) is the
- // very first insert, which should be the very first thing in data. Let's invalidate it
- // and make sure it shows up in the flagged results.
- ah->saveState();
- ah->invalidate(&_txn, *data.begin(), INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *data.begin()).value());
- ah->restoreState(&_txn);
-
- // Make sure the nuked obj is actually in the flagged data.
- ASSERT_EQUALS(ws.getFlagged().size(), size_t(1));
- WorkingSetMember* member = ws.get(*ws.getFlagged().begin());
- ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state);
- BSONElement elt;
ASSERT_TRUE(member->getFieldDotted("foo", &elt));
ASSERT_EQUALS(1, elt.numberInt());
ASSERT_TRUE(member->getFieldDotted("bar", &elt));
ASSERT_EQUALS(1, elt.numberInt());
+ ASSERT_EQUALS(member->loc, *it);
+ }
- set<RecordId>::iterator it = data.begin();
-
- // Proceed along, AND-ing results.
- int count = 0;
- while (!ah->isEOF() && count < 10) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ah->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
-
- ++count;
- ++it;
- member = ws.get(id);
-
- ASSERT_TRUE(member->getFieldDotted("foo", &elt));
- ASSERT_EQUALS(1, elt.numberInt());
- ASSERT_TRUE(member->getFieldDotted("bar", &elt));
- ASSERT_EQUALS(1, elt.numberInt());
- ASSERT_EQUALS(member->loc, *it);
- }
-
- // Move 'it' to a result that's yet to show up.
- for (int i = 0; i < count + 10; ++i) { ++it; }
- // Remove a result that's coming up. It's not the 'target' result of the AND so it's
- // not flagged.
- ah->saveState();
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it).value());
- ah->restoreState(&_txn);
-
- // Get all results aside from the two we killed.
- while (!ah->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ah->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
-
- ++count;
- member = ws.get(id);
-
- ASSERT_TRUE(member->getFieldDotted("foo", &elt));
- ASSERT_EQUALS(1, elt.numberInt());
- ASSERT_TRUE(member->getFieldDotted("bar", &elt));
- ASSERT_EQUALS(1, elt.numberInt());
+ // Move 'it' to a result that's yet to show up.
+ for (int i = 0; i < count + 10; ++i) {
+ ++it;
+ }
+ // Remove a result that's coming up. It's not the 'target' result of the AND so it's
+ // not flagged.
+ ah->saveState();
+ ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
+ remove(coll->docFor(&_txn, *it).value());
+ ah->restoreState(&_txn);
+
+ // Get all results aside from the two we killed.
+ while (!ah->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ah->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
- ASSERT_EQUALS(count, 48);
+ ++count;
+ member = ws.get(id);
- ASSERT_EQUALS(size_t(1), ws.getFlagged().size());
+ ASSERT_TRUE(member->getFieldDotted("foo", &elt));
+ ASSERT_EQUALS(1, elt.numberInt());
+ ASSERT_TRUE(member->getFieldDotted("bar", &elt));
+ ASSERT_EQUALS(1, elt.numberInt());
}
- };
-
-
- // An AND with three children.
- class QueryStageAndSortedThreeLeaf : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- // Insert a bunch of data
- for (int i = 0; i < 50; ++i) {
- // Some data that'll show up but not be in all.
- insert(BSON("foo" << 1 << "baz" << 1));
- insert(BSON("foo" << 1 << "bar" << 1));
- // The needle in the haystack. Only these should be returned by the AND.
- insert(BSON("foo" << 1 << "bar" << 1 << "baz" << 1));
- insert(BSON("foo" << 1));
- insert(BSON("bar" << 1));
- insert(BSON("baz" << 1));
- }
+ ASSERT_EQUALS(count, 48);
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
- addIndex(BSON("baz" << 1));
+ ASSERT_EQUALS(size_t(1), ws.getFlagged().size());
+ }
+};
- WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
- // Scan over foo == 1
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 1);
- params.bounds.endKey = BSON("" << 1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+// An AND with three children.
+class QueryStageAndSortedThreeLeaf : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- // bar == 1
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ // Insert a bunch of data
+ for (int i = 0; i < 50; ++i) {
+ // Some data that'll show up but not be in all.
+ insert(BSON("foo" << 1 << "baz" << 1));
+ insert(BSON("foo" << 1 << "bar" << 1));
+ // The needle in the haystack. Only these should be returned by the AND.
+ insert(BSON("foo" << 1 << "bar" << 1 << "baz" << 1));
+ insert(BSON("foo" << 1));
+ insert(BSON("bar" << 1));
+ insert(BSON("baz" << 1));
+ }
- // baz == 1
- params.descriptor = getIndex(BSON("baz" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+ addIndex(BSON("baz" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
+
+ // Scan over foo == 1
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 1);
+ params.bounds.endKey = BSON("" << 1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // bar == 1
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // baz == 1
+ params.descriptor = getIndex(BSON("baz" << 1), coll);
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ ASSERT_EQUALS(50, countResults(ah.get()));
+ }
+};
+
+// An AND with an index scan that returns nothing.
+class QueryStageAndSortedWithNothing : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- ASSERT_EQUALS(50, countResults(ah.get()));
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << 8 << "bar" << 20));
}
- };
-
- // An AND with an index scan that returns nothing.
- class QueryStageAndSortedWithNothing : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << 8 << "bar" << 20));
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
+
+ // Foo == 7. Should be EOF.
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 7);
+ params.bounds.endKey = BSON("" << 7);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Bar == 20, not EOF.
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSON("" << 20);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ ASSERT_EQUALS(0, countResults(ah.get()));
+ }
+};
+
+// An AND that scans data but returns nothing.
+class QueryStageAndSortedProducesNothing : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
-
- // Foo == 7. Should be EOF.
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 7);
- params.bounds.endKey = BSON("" << 7);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Bar == 20, not EOF.
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSON("" << 20);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- ASSERT_EQUALS(0, countResults(ah.get()));
+ for (int i = 0; i < 50; ++i) {
+ // Insert data with foo=7, bar==20, but nothing with both.
+ insert(BSON("foo" << 8 << "bar" << 20));
+ insert(BSON("foo" << 7 << "bar" << 21));
+ insert(BSON("foo" << 7));
+ insert(BSON("bar" << 20));
}
- };
-
- // An AND that scans data but returns nothing.
- class QueryStageAndSortedProducesNothing : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- // Insert data with foo=7, bar==20, but nothing with both.
- insert(BSON("foo" << 8 << "bar" << 20));
- insert(BSON("foo" << 7 << "bar" << 21));
- insert(BSON("foo" << 7));
- insert(BSON("bar" << 20));
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
+
+ // foo == 7.
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 7);
+ params.bounds.endKey = BSON("" << 7);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // bar == 20.
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSON("" << 20);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ ASSERT_EQUALS(0, countResults(ah.get()));
+ }
+};
+
+// An AND that would return data but the matcher prevents it.
+class QueryStageAndSortedWithMatcher : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
-
- // foo == 7.
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 7);
- params.bounds.endKey = BSON("" << 7);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // bar == 20.
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSON("" << 20);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- ASSERT_EQUALS(0, countResults(ah.get()));
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << 1 << "bar" << 1));
}
- };
-
- // An AND that would return data but the matcher prevents it.
- class QueryStageAndSortedWithMatcher : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << 1 << "bar" << 1));
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ BSONObj filterObj = BSON("foo" << BSON("$ne" << 1));
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
+ verify(swme.isOK());
+ auto_ptr<MatchExpression> filterExpr(swme.getValue());
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, filterExpr.get(), coll));
+
+ // Scan over foo == 1
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 1);
+ params.bounds.endKey = BSON("" << 1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // bar == 1
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // Filter drops everything.
+ ASSERT_EQUALS(0, countResults(ah.get()));
+ }
+};
+
+// Verify that AND preserves the order of the last child.
+class QueryStageAndSortedByLastChild : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- BSONObj filterObj = BSON("foo" << BSON("$ne" << 1));
- StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
- verify(swme.isOK());
- auto_ptr<MatchExpression> filterExpr(swme.getValue());
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, filterExpr.get(), coll));
-
- // Scan over foo == 1
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 1);
- params.bounds.endKey = BSON("" << 1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // bar == 1
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Filter drops everything.
- ASSERT_EQUALS(0, countResults(ah.get()));
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << 1 << "bar" << i));
}
- };
-
- // Verify that AND preserves the order of the last child.
- class QueryStageAndSortedByLastChild : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << 1 << "bar" << i));
- }
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
-
- // Scan over foo == 1
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 1);
- params.bounds.endKey = BSON("" << 1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // Intersect with 7 <= bar < 10000
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- params.bounds.startKey = BSON("" << 7);
- params.bounds.endKey = BSON("" << 10000);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- WorkingSetID lastId = WorkingSet::INVALID_ID;
-
- int count = 0;
- while (!ah->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ah->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
- BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->loc).value();
- ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt());
- ++count;
- if (WorkingSet::INVALID_ID != lastId) {
- BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->loc).value();
- ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0);
- }
- lastId = id;
- }
+ WorkingSet ws;
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
- ASSERT_EQUALS(count, 43);
- }
- };
+ // Scan over foo == 1
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 1);
+ params.bounds.endKey = BSON("" << 1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
- /**
- * SERVER-14607: Check that sort-based intersection works when the first
- * child returns fetched docs but the second child returns index keys.
- */
- class QueryStageAndSortedFirstChildFetched : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+ // Intersect with 7 <= bar < 10000
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ params.bounds.startKey = BSON("" << 7);
+ params.bounds.endKey = BSON("" << 10000);
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
- // Insert a bunch of data
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << 1 << "bar" << 1));
- }
+ WorkingSetID lastId = WorkingSet::INVALID_ID;
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndSortedStage> as(new AndSortedStage(&ws, NULL, coll));
-
- // Scan over foo == 1
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 1);
- params.bounds.endKey = BSON("" << 1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
-
- // First child of the AND_SORTED stage is a Fetch. The NULL in the
- // constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
- as->addChild(fetch);
-
- // bar == 1
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- as->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- for (int i = 0; i < 50; i++) {
- BSONObj obj = getNext(as.get(), &ws);
- ASSERT_EQUALS(1, obj["foo"].numberInt());
- ASSERT_EQUALS(1, obj["bar"].numberInt());
+ int count = 0;
+ while (!ah->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ah->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
+ }
+ BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->loc).value();
+ ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt());
+ ++count;
+ if (WorkingSet::INVALID_ID != lastId) {
+ BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->loc).value();
+ ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0);
}
+ lastId = id;
}
- };
- /**
- * SERVER-14607: Check that sort-based intersection works when the first
- * child returns index keys but the second returns fetched docs.
- */
- class QueryStageAndSortedSecondChildFetched : public QueryStageAndBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = ctx.getCollection();
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+ ASSERT_EQUALS(count, 43);
+ }
+};
- // Insert a bunch of data
- for (int i = 0; i < 50; ++i) {
- insert(BSON("foo" << 1 << "bar" << 1));
- }
+/**
+ * SERVER-14607: Check that sort-based intersection works when the first
+ * child returns fetched docs but the second child returns index keys.
+ */
+class QueryStageAndSortedFirstChildFetched : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- addIndex(BSON("foo" << 1));
- addIndex(BSON("bar" << 1));
-
- WorkingSet ws;
- scoped_ptr<AndSortedStage> as(new AndSortedStage(&ws, NULL, coll));
-
- // Scan over foo == 1
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1), coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 1);
- params.bounds.endKey = BSON("" << 1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- as->addChild(new IndexScan(&_txn, params, &ws, NULL));
-
- // bar == 1
- params.descriptor = getIndex(BSON("bar" << 1), coll);
- IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
-
- // Second child of the AND_SORTED stage is a Fetch. The NULL in the
- // constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
- as->addChild(fetch);
-
- for (int i = 0; i < 50; i++) {
- BSONObj obj = getNext(as.get(), &ws);
- ASSERT_EQUALS(1, obj["foo"].numberInt());
- ASSERT_EQUALS(1, obj["bar"].numberInt());
- }
+ // Insert a bunch of data
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << 1 << "bar" << 1));
}
- };
-
-
- class All : public Suite {
- public:
- All() : Suite( "query_stage_and" ) { }
-
- void setupTests() {
- add<QueryStageAndHashInvalidation>();
- add<QueryStageAndHashTwoLeaf>();
- add<QueryStageAndHashTwoLeafFirstChildLargeKeys>();
- add<QueryStageAndHashTwoLeafLastChildLargeKeys>();
- add<QueryStageAndHashThreeLeaf>();
- add<QueryStageAndHashThreeLeafMiddleChildLargeKeys>();
- add<QueryStageAndHashWithNothing>();
- add<QueryStageAndHashProducesNothing>();
- add<QueryStageAndHashWithMatcher>();
- add<QueryStageAndHashInvalidateLookahead>();
- add<QueryStageAndHashFirstChildFetched>();
- add<QueryStageAndHashSecondChildFetched>();
- add<QueryStageAndSortedInvalidation>();
- add<QueryStageAndSortedThreeLeaf>();
- add<QueryStageAndSortedWithNothing>();
- add<QueryStageAndSortedProducesNothing>();
- add<QueryStageAndSortedWithMatcher>();
- add<QueryStageAndSortedByLastChild>();
- add<QueryStageAndSortedFirstChildFetched>();
- add<QueryStageAndSortedSecondChildFetched>();
+
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndSortedStage> as(new AndSortedStage(&ws, NULL, coll));
+
+ // Scan over foo == 1
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 1);
+ params.bounds.endKey = BSON("" << 1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
+
+ // First child of the AND_SORTED stage is a Fetch. The NULL in the
+ // constructor means there is no filter.
+ FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
+ as->addChild(fetch);
+
+ // bar == 1
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ as->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ for (int i = 0; i < 50; i++) {
+ BSONObj obj = getNext(as.get(), &ws);
+ ASSERT_EQUALS(1, obj["foo"].numberInt());
+ ASSERT_EQUALS(1, obj["bar"].numberInt());
}
- };
+ }
+};
- SuiteInstance<All> queryStageAndAll;
+/**
+ * SERVER-14607: Check that sort-based intersection works when the first
+ * child returns index keys but the second returns fetched docs.
+ */
+class QueryStageAndSortedSecondChildFetched : public QueryStageAndBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
+
+ // Insert a bunch of data
+ for (int i = 0; i < 50; ++i) {
+ insert(BSON("foo" << 1 << "bar" << 1));
+ }
+
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("bar" << 1));
+
+ WorkingSet ws;
+ scoped_ptr<AndSortedStage> as(new AndSortedStage(&ws, NULL, coll));
+
+ // Scan over foo == 1
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1), coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 1);
+ params.bounds.endKey = BSON("" << 1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ as->addChild(new IndexScan(&_txn, params, &ws, NULL));
+
+ // bar == 1
+ params.descriptor = getIndex(BSON("bar" << 1), coll);
+ IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
+
+ // Second child of the AND_SORTED stage is a Fetch. The NULL in the
+ // constructor means there is no filter.
+ FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
+ as->addChild(fetch);
+
+ for (int i = 0; i < 50; i++) {
+ BSONObj obj = getNext(as.get(), &ws);
+ ASSERT_EQUALS(1, obj["foo"].numberInt());
+ ASSERT_EQUALS(1, obj["bar"].numberInt());
+ }
+ }
+};
+
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_and") {}
+
+ void setupTests() {
+ add<QueryStageAndHashInvalidation>();
+ add<QueryStageAndHashTwoLeaf>();
+ add<QueryStageAndHashTwoLeafFirstChildLargeKeys>();
+ add<QueryStageAndHashTwoLeafLastChildLargeKeys>();
+ add<QueryStageAndHashThreeLeaf>();
+ add<QueryStageAndHashThreeLeafMiddleChildLargeKeys>();
+ add<QueryStageAndHashWithNothing>();
+ add<QueryStageAndHashProducesNothing>();
+ add<QueryStageAndHashWithMatcher>();
+ add<QueryStageAndHashInvalidateLookahead>();
+ add<QueryStageAndHashFirstChildFetched>();
+ add<QueryStageAndHashSecondChildFetched>();
+ add<QueryStageAndSortedInvalidation>();
+ add<QueryStageAndSortedThreeLeaf>();
+ add<QueryStageAndSortedWithNothing>();
+ add<QueryStageAndSortedProducesNothing>();
+ add<QueryStageAndSortedWithMatcher>();
+ add<QueryStageAndSortedByLastChild>();
+ add<QueryStageAndSortedFirstChildFetched>();
+ add<QueryStageAndSortedSecondChildFetched>();
+ }
+};
+
+SuiteInstance<All> queryStageAndAll;
} // namespace QueryStageAnd
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index a6c07a65b7a..cdfb0f2dfaf 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -50,224 +50,236 @@
namespace QueryStageCachedPlan {
- class QueryStageCachedPlanBase {
- public:
- QueryStageCachedPlanBase() {
- // If collection exists already, we need to drop it.
- dropCollection();
-
- // Add indices.
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- Client::WriteContext ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
- ASSERT(collection);
-
- // Add data.
- for (int i = 0; i < 10; i++) {
- insertDocument(collection, BSON("_id" << i << "a" << i << "b" << 1));
- }
+class QueryStageCachedPlanBase {
+public:
+ QueryStageCachedPlanBase() {
+ // If collection exists already, we need to drop it.
+ dropCollection();
+
+ // Add indices.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ Client::WriteContext ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Add data.
+ for (int i = 0; i < 10; i++) {
+ insertDocument(collection, BSON("_id" << i << "a" << i << "b" << 1));
}
-
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ void dropCollection() {
+ const NamespaceString nsString(ns());
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::DBLock dbLock(_txn.lockState(), nsString.db(), MODE_X);
+ Database* database = dbHolder().get(&_txn, nsString.db());
+ if (!database) {
+ return;
}
- void dropCollection() {
- const NamespaceString nsString(ns());
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::DBLock dbLock(_txn.lockState(), nsString.db(), MODE_X);
- Database* database = dbHolder().get(&_txn, nsString.db());
- if (!database) {
- return;
- }
+ WriteUnitOfWork wuow(&_txn);
+ database->dropCollection(&_txn, ns());
+ wuow.commit();
+ }
- WriteUnitOfWork wuow(&_txn);
- database->dropCollection(&_txn, ns());
- wuow.commit();
- }
+ void insertDocument(Collection* collection, BSONObj obj) {
+ WriteUnitOfWork wuow(&_txn);
- void insertDocument(Collection* collection, BSONObj obj) {
- WriteUnitOfWork wuow(&_txn);
+ const bool enforceQuota = false;
+ StatusWith<RecordId> res = collection->insertDocument(&_txn, obj, enforceQuota);
+ ASSERT(res.isOK());
- const bool enforceQuota = false;
- StatusWith<RecordId> res = collection->insertDocument(&_txn, obj, enforceQuota);
- ASSERT(res.isOK());
+ wuow.commit();
+ }
- wuow.commit();
- }
+ static void resetEvictionEnabled(bool resetTo) {
+ internalQueryCacheReplanningEnabled = resetTo;
+ }
- static void resetEvictionEnabled(bool resetTo) {
- internalQueryCacheReplanningEnabled = resetTo;
- }
+ static const char* ns() {
+ return "unittests.QueryStageCachedPlan";
+ }
- static const char* ns() {
- return "unittests.QueryStageCachedPlan";
- }
+protected:
+ OperationContextImpl _txn;
+ WorkingSet _ws;
+};
- protected:
- OperationContextImpl _txn;
- WorkingSet _ws;
- };
-
- /**
- * Test that on failure, the cached plan stage replans the query but does not create a new cache
- * entry.
- */
- class QueryStageCachedPlanFailure : public QueryStageCachedPlanBase {
- public:
- void run() {
- bool oldReplanningFlagValue = internalQueryCacheReplanningEnabled;
- internalQueryCacheReplanningEnabled = true;
- ScopeGuard flagResetter = MakeGuard(&QueryStageCachedPlanBase::resetEvictionEnabled,
- oldReplanningFlagValue);
-
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
- ASSERT(collection);
-
- // Query can be answered by either index on "a" or index on "b".
- CanonicalQuery* rawCq;
- ASSERT_OK(CanonicalQuery::canonicalize(ns(), fromjson("{a: {$gte: 8}, b: 1}"), &rawCq));
- boost::scoped_ptr<CanonicalQuery> cq(rawCq);
-
- // We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = collection->infoCache()->getPlanCache();
- ASSERT(cache);
- CachedSolution* rawCachedSolution;
- ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
-
- // Get planner params.
- QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
-
- // Queued data stage will return a failure during the cached plan trial period.
- std::auto_ptr<QueuedDataStage> mockChild(new QueuedDataStage(&_ws));
- mockChild->pushBack(PlanStage::FAILURE);
-
- // High enough so that we shouldn't trigger a replan based on works.
- const size_t decisionWorks = 50;
- CachedPlanStage cachedPlanStage(&_txn, collection, &_ws, cq.get(), plannerParams,
- decisionWorks, mockChild.release(), NULL);
-
- // This should succeed after triggering a replan.
- ASSERT_OK(cachedPlanStage.pickBestPlan(NULL));
-
- // Make sure that we get 2 legit results back.
- size_t numResults = 0;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (state != PlanStage::IS_EOF) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- state = cachedPlanStage.work(&id);
-
- ASSERT_NE(state, PlanStage::FAILURE);
- ASSERT_NE(state, PlanStage::DEAD);
-
- if (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = _ws.get(id);
- ASSERT(cq->root()->matchesBSON(member->obj.value()));
- numResults++;
- }
+/**
+ * Test that on failure, the cached plan stage replans the query but does not create a new cache
+ * entry.
+ */
+class QueryStageCachedPlanFailure : public QueryStageCachedPlanBase {
+public:
+ void run() {
+ bool oldReplanningFlagValue = internalQueryCacheReplanningEnabled;
+ internalQueryCacheReplanningEnabled = true;
+ ScopeGuard flagResetter =
+ MakeGuard(&QueryStageCachedPlanBase::resetEvictionEnabled, oldReplanningFlagValue);
+
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Query can be answered by either index on "a" or index on "b".
+ CanonicalQuery* rawCq;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns(), fromjson("{a: {$gte: 8}, b: 1}"), &rawCq));
+ boost::scoped_ptr<CanonicalQuery> cq(rawCq);
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+ CachedSolution* rawCachedSolution;
+ ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+
+ // Queued data stage will return a failure during the cached plan trial period.
+ std::auto_ptr<QueuedDataStage> mockChild(new QueuedDataStage(&_ws));
+ mockChild->pushBack(PlanStage::FAILURE);
+
+ // High enough so that we shouldn't trigger a replan based on works.
+ const size_t decisionWorks = 50;
+ CachedPlanStage cachedPlanStage(&_txn,
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ mockChild.release(),
+ NULL);
+
+ // This should succeed after triggering a replan.
+ ASSERT_OK(cachedPlanStage.pickBestPlan(NULL));
+
+ // Make sure that we get 2 legit results back.
+ size_t numResults = 0;
+ PlanStage::StageState state = PlanStage::NEED_TIME;
+ while (state != PlanStage::IS_EOF) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ state = cachedPlanStage.work(&id);
+
+ ASSERT_NE(state, PlanStage::FAILURE);
+ ASSERT_NE(state, PlanStage::DEAD);
+
+ if (state == PlanStage::ADVANCED) {
+ WorkingSetMember* member = _ws.get(id);
+ ASSERT(cq->root()->matchesBSON(member->obj.value()));
+ numResults++;
}
-
- ASSERT_EQ(numResults, 2U);
-
- // Plan cache should still be empty, as we don't write to it when we replan a failed
- // query.
- ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
-
- resetEvictionEnabled(oldReplanningFlagValue);
- flagResetter.Dismiss();
}
- };
-
- /**
- * Test that hitting the cached plan stage trial period's threshold for work cycles causes the
- * query to be replanned. Also verify that the replanning results in a new plan cache entry.
- */
- class QueryStageCachedPlanHitMaxWorks : public QueryStageCachedPlanBase {
- public:
- void run() {
- bool oldReplanningFlagValue = internalQueryCacheReplanningEnabled;
- internalQueryCacheReplanningEnabled = true;
- ScopeGuard flagResetter = MakeGuard(&QueryStageCachedPlanBase::resetEvictionEnabled,
- oldReplanningFlagValue);
-
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
- ASSERT(collection);
-
- // Query can be answered by either index on "a" or index on "b".
- CanonicalQuery* rawCq;
- ASSERT_OK(CanonicalQuery::canonicalize(ns(), fromjson("{a: {$gte: 8}, b: 1}"), &rawCq));
- boost::scoped_ptr<CanonicalQuery> cq(rawCq);
-
- // We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = collection->infoCache()->getPlanCache();
- ASSERT(cache);
- CachedSolution* rawCachedSolution;
- ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
-
- // Get planner params.
- QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
-
- // Set up queued data stage to take a long time before returning EOF. Should be long
- // enough to trigger a replan.
- const size_t decisionWorks = 10;
- const size_t mockWorks = 1U + static_cast<size_t>(internalQueryCacheEvictionRatio
- * decisionWorks);
- std::auto_ptr<QueuedDataStage> mockChild(new QueuedDataStage(&_ws));
- for (size_t i = 0; i < mockWorks; i++) {
- mockChild->pushBack(PlanStage::NEED_TIME);
- }
- CachedPlanStage cachedPlanStage(&_txn, collection, &_ws, cq.get(), plannerParams,
- decisionWorks, mockChild.release(), NULL);
+ ASSERT_EQ(numResults, 2U);
- // This should succeed after triggering a replan.
- ASSERT_OK(cachedPlanStage.pickBestPlan(NULL));
+ // Plan cache should still be empty, as we don't write to it when we replan a failed
+ // query.
+ ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
- // Make sure that we get 2 legit results back.
- size_t numResults = 0;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (state != PlanStage::IS_EOF) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- state = cachedPlanStage.work(&id);
+ resetEvictionEnabled(oldReplanningFlagValue);
+ flagResetter.Dismiss();
+ }
+};
- ASSERT_NE(state, PlanStage::FAILURE);
- ASSERT_NE(state, PlanStage::DEAD);
+/**
+ * Test that hitting the cached plan stage trial period's threshold for work cycles causes the
+ * query to be replanned. Also verify that the replanning results in a new plan cache entry.
+ */
+class QueryStageCachedPlanHitMaxWorks : public QueryStageCachedPlanBase {
+public:
+ void run() {
+ bool oldReplanningFlagValue = internalQueryCacheReplanningEnabled;
+ internalQueryCacheReplanningEnabled = true;
+ ScopeGuard flagResetter =
+ MakeGuard(&QueryStageCachedPlanBase::resetEvictionEnabled, oldReplanningFlagValue);
+
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Query can be answered by either index on "a" or index on "b".
+ CanonicalQuery* rawCq;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns(), fromjson("{a: {$gte: 8}, b: 1}"), &rawCq));
+ boost::scoped_ptr<CanonicalQuery> cq(rawCq);
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+ CachedSolution* rawCachedSolution;
+ ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+
+ // Set up queued data stage to take a long time before returning EOF. Should be long
+ // enough to trigger a replan.
+ const size_t decisionWorks = 10;
+ const size_t mockWorks =
+ 1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
+ std::auto_ptr<QueuedDataStage> mockChild(new QueuedDataStage(&_ws));
+ for (size_t i = 0; i < mockWorks; i++) {
+ mockChild->pushBack(PlanStage::NEED_TIME);
+ }
- if (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = _ws.get(id);
- ASSERT(cq->root()->matchesBSON(member->obj.value()));
- numResults++;
- }
+ CachedPlanStage cachedPlanStage(&_txn,
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ mockChild.release(),
+ NULL);
+
+ // This should succeed after triggering a replan.
+ ASSERT_OK(cachedPlanStage.pickBestPlan(NULL));
+
+ // Make sure that we get 2 legit results back.
+ size_t numResults = 0;
+ PlanStage::StageState state = PlanStage::NEED_TIME;
+ while (state != PlanStage::IS_EOF) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ state = cachedPlanStage.work(&id);
+
+ ASSERT_NE(state, PlanStage::FAILURE);
+ ASSERT_NE(state, PlanStage::DEAD);
+
+ if (state == PlanStage::ADVANCED) {
+ WorkingSetMember* member = _ws.get(id);
+ ASSERT(cq->root()->matchesBSON(member->obj.value()));
+ numResults++;
}
+ }
- ASSERT_EQ(numResults, 2U);
+ ASSERT_EQ(numResults, 2U);
- // This time we expect to find something in the plan cache. Replans after hitting the
- // works threshold result in a cache entry.
- ASSERT_OK(cache->get(*cq, &rawCachedSolution));
- boost::scoped_ptr<CachedSolution> cachedSolution(rawCachedSolution);
+ // This time we expect to find something in the plan cache. Replans after hitting the
+ // works threshold result in a cache entry.
+ ASSERT_OK(cache->get(*cq, &rawCachedSolution));
+ boost::scoped_ptr<CachedSolution> cachedSolution(rawCachedSolution);
- resetEvictionEnabled(oldReplanningFlagValue);
- flagResetter.Dismiss();
- }
- };
+ resetEvictionEnabled(oldReplanningFlagValue);
+ flagResetter.Dismiss();
+ }
+};
- class All : public Suite {
- public:
- All() : Suite("query_stage_cached_plan") {}
+class All : public Suite {
+public:
+ All() : Suite("query_stage_cached_plan") {}
- void setupTests() {
- add<QueryStageCachedPlanFailure>();
- add<QueryStageCachedPlanHitMaxWorks>();
- }
- };
+ void setupTests() {
+ add<QueryStageCachedPlanFailure>();
+ add<QueryStageCachedPlanHitMaxWorks>();
+ }
+};
- SuiteInstance<All> all;
+SuiteInstance<All> all;
-} // namespace QueryStageCachedPlan
+} // namespace QueryStageCachedPlan
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 80ac05acd0c..d98d87d7643 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -48,353 +48,358 @@
namespace QueryStageCollectionScan {
- using boost::scoped_ptr;
- using std::auto_ptr;
- using std::vector;
-
- //
- // Stage-specific tests.
- //
-
- class QueryStageCollectionScanBase {
- public:
- QueryStageCollectionScanBase() : _client(&_txn) {
- Client::WriteContext ctx(&_txn, ns());
-
- for (int i = 0; i < numObj(); ++i) {
- BSONObjBuilder bob;
- bob.append("foo", i);
- _client.insert(ns(), bob.obj());
- }
- }
-
- virtual ~QueryStageCollectionScanBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
- }
-
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
+using boost::scoped_ptr;
+using std::auto_ptr;
+using std::vector;
+
+//
+// Stage-specific tests.
+//
+
+class QueryStageCollectionScanBase {
+public:
+ QueryStageCollectionScanBase() : _client(&_txn) {
+ Client::WriteContext ctx(&_txn, ns());
+
+ for (int i = 0; i < numObj(); ++i) {
+ BSONObjBuilder bob;
+ bob.append("foo", i);
+ _client.insert(ns(), bob.obj());
}
-
- int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) {
- AutoGetCollectionForRead ctx(&_txn, ns());
-
- // Configure the scan.
- CollectionScanParams params;
- params.collection = ctx.getCollection();
- params.direction = direction;
- params.tailable = false;
-
- // Make the filter.
- StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
- verify(swme.isOK());
- auto_ptr<MatchExpression> filterExpr(swme.getValue());
-
- // Make a scan and have the runner own it.
- WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(&_txn, params, ws, filterExpr.get());
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, ps, params.collection,
- PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- // Use the runner to count the number of objects scanned.
- int count = 0;
- for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL); ) { ++count; }
- return count;
+ }
+
+ virtual ~QueryStageCollectionScanBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.collection = ctx.getCollection();
+ params.direction = direction;
+ params.tailable = false;
+
+ // Make the filter.
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
+ verify(swme.isOK());
+ auto_ptr<MatchExpression> filterExpr(swme.getValue());
+
+ // Make a scan and have the runner own it.
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* ps = new CollectionScan(&_txn, params, ws, filterExpr.get());
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(
+ &_txn, ws, ps, params.collection, PlanExecutor::YIELD_MANUAL, &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ // Use the runner to count the number of objects scanned.
+ int count = 0;
+ for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL);) {
+ ++count;
}
-
- void getLocs(Collection* collection,
- CollectionScanParams::Direction direction,
- vector<RecordId>* out) {
- WorkingSet ws;
-
- CollectionScanParams params;
- params.collection = collection;
- params.direction = direction;
- params.tailable = false;
-
- scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
- while (!scan->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- out->push_back(member->loc);
- }
+ return count;
+ }
+
+ void getLocs(Collection* collection,
+ CollectionScanParams::Direction direction,
+ vector<RecordId>* out) {
+ WorkingSet ws;
+
+ CollectionScanParams params;
+ params.collection = collection;
+ params.direction = direction;
+ params.tailable = false;
+
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ verify(member->hasLoc());
+ out->push_back(member->loc);
}
}
+ }
+
+ static int numObj() {
+ return 50;
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageCollectionScan";
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+private:
+ DBDirectClient _client;
+};
+
+
+//
+// Go forwards, get everything.
+//
+class QueryStageCollscanBasicForward : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ ASSERT_EQUALS(numObj(), countResults(CollectionScanParams::FORWARD, BSONObj()));
+ }
+};
+
+//
+// Go backwards, get everything.
+//
+
+class QueryStageCollscanBasicBackward : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ ASSERT_EQUALS(numObj(), countResults(CollectionScanParams::BACKWARD, BSONObj()));
+ }
+};
+
+//
+// Go forwards and match half the docs.
+//
+
+class QueryStageCollscanBasicForwardWithMatch : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ BSONObj obj = BSON("foo" << BSON("$lt" << 25));
+ ASSERT_EQUALS(25, countResults(CollectionScanParams::FORWARD, obj));
+ }
+};
+
+//
+// Go backwards and match half the docs.
+//
+
+class QueryStageCollscanBasicBackwardWithMatch : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ BSONObj obj = BSON("foo" << BSON("$lt" << 25));
+ ASSERT_EQUALS(25, countResults(CollectionScanParams::BACKWARD, obj));
+ }
+};
+
+//
+// Get objects in the order we inserted them.
+//
+
+class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.collection = ctx.getCollection();
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+
+ // Make a scan and have the runner own it.
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(
+ &_txn, ws, ps, params.collection, PlanExecutor::YIELD_MANUAL, &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ int count = 0;
+ for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL);) {
+ // Make sure we get the objects in the order we want
+ ASSERT_EQUALS(count, obj["foo"].numberInt());
+ ++count;
+ }
- static int numObj() { return 50; }
-
- static const char* ns() { return "unittests.QueryStageCollectionScan"; }
-
- protected:
- OperationContextImpl _txn;
+ ASSERT_EQUALS(numObj(), count);
+ }
+};
- private:
- DBDirectClient _client;
- };
+//
+// Get objects in the reverse order we inserted them when we go backwards.
+//
+class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ AutoGetCollectionForRead ctx(&_txn, ns());
- //
- // Go forwards, get everything.
- //
- class QueryStageCollscanBasicForward : public QueryStageCollectionScanBase {
- public:
- void run() {
- ASSERT_EQUALS(numObj(), countResults(CollectionScanParams::FORWARD, BSONObj()));
- }
- };
+ CollectionScanParams params;
+ params.collection = ctx.getCollection();
+ params.direction = CollectionScanParams::BACKWARD;
+ params.tailable = false;
- //
- // Go backwards, get everything.
- //
-
- class QueryStageCollscanBasicBackward : public QueryStageCollectionScanBase {
- public:
- void run() {
- ASSERT_EQUALS(numObj(), countResults(CollectionScanParams::BACKWARD, BSONObj()));
- }
- };
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
- //
- // Go forwards and match half the docs.
- //
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(
+ &_txn, ws, ps, params.collection, PlanExecutor::YIELD_MANUAL, &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
- class QueryStageCollscanBasicForwardWithMatch : public QueryStageCollectionScanBase {
- public:
- void run() {
- BSONObj obj = BSON("foo" << BSON("$lt" << 25));
- ASSERT_EQUALS(25, countResults(CollectionScanParams::FORWARD, obj));
+ int count = 0;
+ for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL);) {
+ ++count;
+ ASSERT_EQUALS(numObj() - count, obj["foo"].numberInt());
}
- };
-
- //
- // Go backwards and match half the docs.
- //
- class QueryStageCollscanBasicBackwardWithMatch : public QueryStageCollectionScanBase {
- public:
- void run() {
- BSONObj obj = BSON("foo" << BSON("$lt" << 25));
- ASSERT_EQUALS(25, countResults(CollectionScanParams::BACKWARD, obj));
- }
- };
-
- //
- // Get objects in the order we inserted them.
- //
-
- class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanBase {
- public:
- void run() {
- AutoGetCollectionForRead ctx(&_txn, ns());
-
- // Configure the scan.
- CollectionScanParams params;
- params.collection = ctx.getCollection();
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = false;
-
- // Make a scan and have the runner own it.
- WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, ps, params.collection,
- PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- int count = 0;
- for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL); ) {
- // Make sure we get the objects in the order we want
- ASSERT_EQUALS(count, obj["foo"].numberInt());
+ ASSERT_EQUALS(numObj(), count);
+ }
+};
+
+//
+// Scan through half the objects, delete the one we're about to fetch, then expect to get the
+// "next" object we would have gotten after that.
+//
+
+class QueryStageCollscanInvalidateUpcomingObject : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ Collection* coll = ctx.getCollection();
+
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
+ getLocs(coll, CollectionScanParams::FORWARD, &locs);
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.collection = coll;
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+
+ WorkingSet ws;
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+
+ int count = 0;
+ while (count < 10) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
++count;
}
-
- ASSERT_EQUALS(numObj(), count);
}
- };
-
- //
- // Get objects in the reverse order we inserted them when we go backwards.
- //
- class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScanBase {
- public:
- void run() {
- AutoGetCollectionForRead ctx(&_txn, ns());
-
- CollectionScanParams params;
- params.collection = ctx.getCollection();
- params.direction = CollectionScanParams::BACKWARD;
- params.tailable = false;
-
- WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, ps, params.collection,
- PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- int count = 0;
- for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL); ) {
+ // Remove locs[count].
+ scan->saveState();
+ scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
+ remove(coll->docFor(&_txn, locs[count]).value());
+ scan->restoreState(&_txn);
+
+ // Skip over locs[count].
+ ++count;
+
+ // Expect the rest.
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
++count;
- ASSERT_EQUALS(numObj() - count, obj["foo"].numberInt());
}
-
- ASSERT_EQUALS(numObj(), count);
}
- };
-
- //
- // Scan through half the objects, delete the one we're about to fetch, then expect to get the
- // "next" object we would have gotten after that.
- //
-
- class QueryStageCollscanInvalidateUpcomingObject : public QueryStageCollectionScanBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- Collection* coll = ctx.getCollection();
-
- // Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
-
- // Configure the scan.
- CollectionScanParams params;
- params.collection = coll;
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = false;
-
- WorkingSet ws;
- scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
-
- int count = 0;
- while (count < 10) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
- ++count;
- }
- }
-
- // Remove locs[count].
- scan->saveState();
- scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, locs[count]).value());
- scan->restoreState(&_txn);
- // Skip over locs[count].
- ++count;
-
- // Expect the rest.
- while (!scan->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
- ++count;
- }
+ ASSERT_EQUALS(numObj(), count);
+ }
+};
+
+//
+// Scan through half the objects, delete the one we're about to fetch, then expect to get the
+// "next" object we would have gotten after that. But, do it in reverse!
+//
+
+class QueryStageCollscanInvalidateUpcomingObjectBackward : public QueryStageCollectionScanBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Collection* coll = ctx.getCollection();
+
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
+ getLocs(coll, CollectionScanParams::BACKWARD, &locs);
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.collection = coll;
+ params.direction = CollectionScanParams::BACKWARD;
+ params.tailable = false;
+
+ WorkingSet ws;
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+
+ int count = 0;
+ while (count < 10) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
+ ++count;
}
-
- ASSERT_EQUALS(numObj(), count);
}
- };
-
- //
- // Scan through half the objects, delete the one we're about to fetch, then expect to get the
- // "next" object we would have gotten after that. But, do it in reverse!
- //
-
- class QueryStageCollscanInvalidateUpcomingObjectBackward : public QueryStageCollectionScanBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Collection* coll = ctx.getCollection();
-
- // Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::BACKWARD, &locs);
-
- // Configure the scan.
- CollectionScanParams params;
- params.collection = coll;
- params.direction = CollectionScanParams::BACKWARD;
- params.tailable = false;
-
- WorkingSet ws;
- scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
-
- int count = 0;
- while (count < 10) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
- ++count;
- }
- }
-
- // Remove locs[count].
- scan->saveState();
- scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, locs[count]).value());
- scan->restoreState(&_txn);
-
- // Skip over locs[count].
- ++count;
- // Expect the rest.
- while (!scan->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
- member->obj.value()["foo"].numberInt());
- ++count;
- }
+ // Remove locs[count].
+ scan->saveState();
+ scan->invalidate(&_txn, locs[count], INVALIDATION_DELETION);
+ remove(coll->docFor(&_txn, locs[count]).value());
+ scan->restoreState(&_txn);
+
+ // Skip over locs[count].
+ ++count;
+
+ // Expect the rest.
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(coll->docFor(&_txn, locs[count]).value()["foo"].numberInt(),
+ member->obj.value()["foo"].numberInt());
+ ++count;
}
-
- ASSERT_EQUALS(numObj(), count);
}
- };
-
- class All : public Suite {
- public:
- All() : Suite( "QueryStageCollectionScan" ) {}
-
- void setupTests() {
- // Stage-specific tests below.
- add<QueryStageCollscanBasicForward>();
- add<QueryStageCollscanBasicBackward>();
- add<QueryStageCollscanBasicForwardWithMatch>();
- add<QueryStageCollscanBasicBackwardWithMatch>();
- add<QueryStageCollscanObjectsInOrderForward>();
- add<QueryStageCollscanObjectsInOrderBackward>();
- add<QueryStageCollscanInvalidateUpcomingObject>();
- add<QueryStageCollscanInvalidateUpcomingObjectBackward>();
- }
- };
-
- SuiteInstance<All> all;
+ ASSERT_EQUALS(numObj(), count);
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("QueryStageCollectionScan") {}
+
+ void setupTests() {
+ // Stage-specific tests below.
+ add<QueryStageCollscanBasicForward>();
+ add<QueryStageCollscanBasicBackward>();
+ add<QueryStageCollscanBasicForwardWithMatch>();
+ add<QueryStageCollscanBasicBackwardWithMatch>();
+ add<QueryStageCollscanObjectsInOrderForward>();
+ add<QueryStageCollscanObjectsInOrderBackward>();
+ add<QueryStageCollscanInvalidateUpcomingObject>();
+ add<QueryStageCollscanInvalidateUpcomingObjectBackward>();
+ }
+};
+
+SuiteInstance<All> all;
}
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index f100a8901f1..bdd79bcf6a6 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -42,316 +42,316 @@
namespace QueryStageCount {
- using boost::scoped_ptr;
- using std::auto_ptr;
- using std::vector;
-
- const int kDocuments = 100;
- const int kInterjections = kDocuments;
-
- class CountStageTest {
- public:
- CountStageTest()
- : _txn(),
- _scopedXact(&_txn, MODE_IX),
- _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
- _ctx(&_txn, ns()),
- _coll(NULL) {
+using boost::scoped_ptr;
+using std::auto_ptr;
+using std::vector;
- }
-
- virtual ~CountStageTest() {}
-
- virtual void interject(CountStage&, int) {}
-
- virtual void setup() {
- WriteUnitOfWork wunit(&_txn);
-
- _ctx.db()->dropCollection(&_txn, ns());
- _coll = _ctx.db()->createCollection(&_txn, ns());
-
- _coll->getIndexCatalog()->createIndexOnEmptyCollection(
- &_txn,
- BSON(
- "key" << BSON("x" << 1) <<
- "name" << "x_1" <<
- "ns" << ns() <<
- "v" << 1
- )
- );
-
- for (int i=0; i<kDocuments; i++) {
- insert(BSON(GENOID << "x" << i));
- }
-
- wunit.commit();
- }
+const int kDocuments = 100;
+const int kInterjections = kDocuments;
- void getLocs() {
- _locs.clear();
- WorkingSet ws;
-
- CollectionScanParams params;
- params.collection = _coll;
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = false;
-
- scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
- while (!scan->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- _locs.push_back(member->loc);
- }
- }
- }
+class CountStageTest {
+public:
+ CountStageTest()
+ : _txn(),
+ _scopedXact(&_txn, MODE_IX),
+ _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
+ _ctx(&_txn, ns()),
+ _coll(NULL) {}
- void insert(const BSONObj& doc) {
- WriteUnitOfWork wunit(&_txn);
- _coll->insertDocument(&_txn, doc, false);
- wunit.commit();
- }
-
- void remove(const RecordId& loc) {
- WriteUnitOfWork wunit(&_txn);
- _coll->deleteDocument(&_txn, loc, false, false, NULL);
- wunit.commit();
- }
+ virtual ~CountStageTest() {}
- void update(const RecordId& oldLoc, const BSONObj& newDoc) {
- WriteUnitOfWork wunit(&_txn);
- BSONObj oldDoc = _coll->getRecordStore()->dataFor( &_txn, oldLoc ).releaseToBson();
- _coll->updateDocument(&_txn, oldLoc,
- Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(), oldDoc),
- newDoc, false, true, NULL);
- wunit.commit();
- }
+ virtual void interject(CountStage&, int) {}
- // testcount is a wrapper around runCount that
- // - sets up a countStage
- // - runs it
- // - asserts count is not trivial
- // - asserts nCounted is equal to expected_n
- // - asserts nSkipped is correct
- void testCount(const CountRequest& request, int expected_n=kDocuments, bool indexed=false) {
- setup();
- getLocs();
-
- auto_ptr<WorkingSet> ws(new WorkingSet);
-
- StatusWithMatchExpression swme = MatchExpressionParser::parse(request.query);
- auto_ptr<MatchExpression> expression(swme.getValue());
-
- PlanStage* scan;
- if (indexed) {
- scan = createIndexScan(expression.get(), ws.get());
- } else {
- scan = createCollScan(expression.get(), ws.get());
- }
+ virtual void setup() {
+ WriteUnitOfWork wunit(&_txn);
- CountStage countStage(&_txn, _coll, request, ws.get(), scan);
+ _ctx.db()->dropCollection(&_txn, ns());
+ _coll = _ctx.db()->createCollection(&_txn, ns());
- const CountStats* stats = runCount(countStage);
+ _coll->getIndexCatalog()->createIndexOnEmptyCollection(&_txn,
+ BSON("key"
+ << BSON("x" << 1) << "name"
+ << "x_1"
+ << "ns" << ns() << "v" << 1));
- ASSERT_FALSE(stats->trivialCount);
- ASSERT_EQUALS(stats->nCounted, expected_n);
- ASSERT_EQUALS(stats->nSkipped, request.skip);
+ for (int i = 0; i < kDocuments; i++) {
+ insert(BSON(GENOID << "x" << i));
}
- // Performs a test using a count stage whereby each unit of work is interjected
- // in some way by the invocation of interject().
- const CountStats* runCount(CountStage& count_stage) {
- int interjection = 0;
- WorkingSetID wsid;
-
- while (!count_stage.isEOF()) {
- // do some work -- assumes that one work unit counts a single doc
- PlanStage::StageState state = count_stage.work(&wsid);
- ASSERT_NOT_EQUALS(state, PlanStage::FAILURE);
- ASSERT_NOT_EQUALS(state, PlanStage::DEAD);
-
- // prepare for yield
- count_stage.saveState();
-
- // interject in some way kInterjection times
- if (interjection < kInterjections) {
- interject(count_stage, interjection++);
- }
-
- // resume from yield
- count_stage.restoreState(&_txn);
+ wunit.commit();
+ }
+
+ void getLocs() {
+ _locs.clear();
+ WorkingSet ws;
+
+ CollectionScanParams params;
+ params.collection = _coll;
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ verify(member->hasLoc());
+ _locs.push_back(member->loc);
}
-
- return static_cast<const CountStats*>(count_stage.getSpecificStats());
- }
-
- IndexScan* createIndexScan(MatchExpression* expr, WorkingSet* ws) {
- IndexCatalog* catalog = _coll->getIndexCatalog();
- IndexDescriptor* descriptor = catalog->findIndexByKeyPattern(&_txn, BSON("x" << 1));
- invariant(descriptor);
-
- // We are not testing indexing here so use maximal bounds
- IndexScanParams params;
- params.descriptor = descriptor;
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 0);
- params.bounds.endKey = BSON("" << kDocuments+1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
-
- // This child stage gets owned and freed by its parent CountStage
- return new IndexScan(&_txn, params, ws, expr);
- }
-
- CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) {
- CollectionScanParams params;
- params.collection = _coll;
-
- // This child stage gets owned and freed by its parent CountStage
- return new CollectionScan(&_txn, params, ws, expr);
- }
-
- CountRequest createCountRequest(const BSONObj& filter, size_t skip=0, size_t limit=0) {
- CountRequest request;
- request.ns = ns();
- request.query = filter;
- request.limit = limit;
- request.skip = skip;
- request.explain = false;
- request.hint = BSONObj();
- return request;
- }
-
- static const char* ns() { return "unittest.QueryStageCount"; }
-
- protected:
- vector<RecordId> _locs;
- OperationContextImpl _txn;
- ScopedTransaction _scopedXact;
- Lock::DBLock _dbLock;
- Client::Context _ctx;
- Collection* _coll;
- };
-
- class QueryStageCountNoChangeDuringYield : public CountStageTest {
- public:
- void run() {
- BSONObj filter = BSON("x" << LT << kDocuments/2);
- CountRequest request = createCountRequest(filter);
- testCount(request, kDocuments/2);
- testCount(request, kDocuments/2, true);
- }
- };
-
- class QueryStageCountYieldWithSkip : public CountStageTest {
- public:
- void run() {
- CountRequest request = createCountRequest(BSON("x" << GTE << 0), 2);
- testCount(request, kDocuments-2);
- testCount(request, kDocuments-2, true);
}
- };
-
- class QueryStageCountYieldWithLimit : public CountStageTest {
- public:
- void run() {
- CountRequest request = createCountRequest(BSON("x" << GTE << 0), 0, 2);
- testCount(request, 2);
- testCount(request, 2, true);
+ }
+
+ void insert(const BSONObj& doc) {
+ WriteUnitOfWork wunit(&_txn);
+ _coll->insertDocument(&_txn, doc, false);
+ wunit.commit();
+ }
+
+ void remove(const RecordId& loc) {
+ WriteUnitOfWork wunit(&_txn);
+ _coll->deleteDocument(&_txn, loc, false, false, NULL);
+ wunit.commit();
+ }
+
+ void update(const RecordId& oldLoc, const BSONObj& newDoc) {
+ WriteUnitOfWork wunit(&_txn);
+ BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_txn, oldLoc).releaseToBson();
+ _coll->updateDocument(&_txn,
+ oldLoc,
+ Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(), oldDoc),
+ newDoc,
+ false,
+ true,
+ NULL);
+ wunit.commit();
+ }
+
+ // testcount is a wrapper around runCount that
+ // - sets up a countStage
+ // - runs it
+ // - asserts count is not trivial
+ // - asserts nCounted is equal to expected_n
+ // - asserts nSkipped is correct
+ void testCount(const CountRequest& request, int expected_n = kDocuments, bool indexed = false) {
+ setup();
+ getLocs();
+
+ auto_ptr<WorkingSet> ws(new WorkingSet);
+
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(request.query);
+ auto_ptr<MatchExpression> expression(swme.getValue());
+
+ PlanStage* scan;
+ if (indexed) {
+ scan = createIndexScan(expression.get(), ws.get());
+ } else {
+ scan = createCollScan(expression.get(), ws.get());
}
- };
+ CountStage countStage(&_txn, _coll, request, ws.get(), scan);
- class QueryStageCountInsertDuringYield : public CountStageTest {
- public:
- void run() {
- CountRequest request = createCountRequest(BSON("x" << 1));
- testCount(request, kInterjections+1);
- testCount(request, kInterjections+1, true);
- }
+ const CountStats* stats = runCount(countStage);
- // This is called 100 times as we scan the collection
- void interject(CountStage&, int) {
- insert(BSON(GENOID << "x" << 1));
- }
- };
-
- class QueryStageCountDeleteDuringYield : public CountStageTest {
- public:
- void run() {
- // expected count would be 99 but we delete the second record
- // after doing the first unit of work
- CountRequest request = createCountRequest(BSON("x" << GTE << 1));
- testCount(request, kDocuments-2);
- testCount(request, kDocuments-2, true);
- }
+ ASSERT_FALSE(stats->trivialCount);
+ ASSERT_EQUALS(stats->nCounted, expected_n);
+ ASSERT_EQUALS(stats->nSkipped, request.skip);
+ }
- // At the point which this is called we are in between counting the first + second record
- void interject(CountStage& count_stage, int interjection) {
- if (interjection == 0) {
- // At this point, our first interjection, we've counted _locs[0]
- // and are about to count _locs[1]
- count_stage.invalidate(&_txn, _locs[interjection], INVALIDATION_DELETION);
- remove(_locs[interjection]);
+ // Performs a test using a count stage whereby each unit of work is interjected
+ // in some way by the invocation of interject().
+ const CountStats* runCount(CountStage& count_stage) {
+ int interjection = 0;
+ WorkingSetID wsid;
- count_stage.invalidate(&_txn, _locs[interjection+1], INVALIDATION_DELETION);
- remove(_locs[interjection+1]);
- }
- }
- };
-
- class QueryStageCountUpdateDuringYield : public CountStageTest {
- public:
- void run() {
- // expected count would be kDocuments-2 but we update the first and second records
- // after doing the first unit of work so they wind up getting counted later on
- CountRequest request = createCountRequest(BSON("x" << GTE << 2));
- testCount(request, kDocuments);
- testCount(request, kDocuments, true);
- }
+ while (!count_stage.isEOF()) {
+ // do some work -- assumes that one work unit counts a single doc
+ PlanStage::StageState state = count_stage.work(&wsid);
+ ASSERT_NOT_EQUALS(state, PlanStage::FAILURE);
+ ASSERT_NOT_EQUALS(state, PlanStage::DEAD);
- // At the point which this is called we are in between the first and second record
- void interject(CountStage& count_stage, int interjection) {
- if (interjection == 0) {
- count_stage.invalidate(&_txn, _locs[0], INVALIDATION_MUTATION);
- OID id1 = _coll->docFor(&_txn, _locs[0]).value().getField("_id").OID();
- update(_locs[0], BSON("_id" << id1 << "x" << 100));
+ // prepare for yield
+ count_stage.saveState();
- count_stage.invalidate(&_txn, _locs[1], INVALIDATION_MUTATION);
- OID id2 = _coll->docFor(&_txn, _locs[1]).value().getField("_id").OID();
- update(_locs[1], BSON("_id" << id2 << "x" << 100));
+ // interject in some way kInterjection times
+ if (interjection < kInterjections) {
+ interject(count_stage, interjection++);
}
- }
- };
- class QueryStageCountMultiKeyDuringYield : public CountStageTest {
- public:
- void run() {
- CountRequest request = createCountRequest(BSON("x" << 1));
- testCount(request, kDocuments+1, true); // only applies to indexed case
+ // resume from yield
+ count_stage.restoreState(&_txn);
}
- void interject(CountStage&, int) {
- // Should cause index to be converted to multikey
- insert(BSON(GENOID << "x" << BSON_ARRAY(1 << 2)));
+ return static_cast<const CountStats*>(count_stage.getSpecificStats());
+ }
+
+ IndexScan* createIndexScan(MatchExpression* expr, WorkingSet* ws) {
+ IndexCatalog* catalog = _coll->getIndexCatalog();
+ IndexDescriptor* descriptor = catalog->findIndexByKeyPattern(&_txn, BSON("x" << 1));
+ invariant(descriptor);
+
+ // We are not testing indexing here so use maximal bounds
+ IndexScanParams params;
+ params.descriptor = descriptor;
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 0);
+ params.bounds.endKey = BSON("" << kDocuments + 1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+
+ // This child stage gets owned and freed by its parent CountStage
+ return new IndexScan(&_txn, params, ws, expr);
+ }
+
+ CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) {
+ CollectionScanParams params;
+ params.collection = _coll;
+
+ // This child stage gets owned and freed by its parent CountStage
+ return new CollectionScan(&_txn, params, ws, expr);
+ }
+
+ CountRequest createCountRequest(const BSONObj& filter, size_t skip = 0, size_t limit = 0) {
+ CountRequest request;
+ request.ns = ns();
+ request.query = filter;
+ request.limit = limit;
+ request.skip = skip;
+ request.explain = false;
+ request.hint = BSONObj();
+ return request;
+ }
+
+ static const char* ns() {
+ return "unittest.QueryStageCount";
+ }
+
+protected:
+ vector<RecordId> _locs;
+ OperationContextImpl _txn;
+ ScopedTransaction _scopedXact;
+ Lock::DBLock _dbLock;
+ Client::Context _ctx;
+ Collection* _coll;
+};
+
+class QueryStageCountNoChangeDuringYield : public CountStageTest {
+public:
+ void run() {
+ BSONObj filter = BSON("x" << LT << kDocuments / 2);
+ CountRequest request = createCountRequest(filter);
+ testCount(request, kDocuments / 2);
+ testCount(request, kDocuments / 2, true);
+ }
+};
+
+class QueryStageCountYieldWithSkip : public CountStageTest {
+public:
+ void run() {
+ CountRequest request = createCountRequest(BSON("x" << GTE << 0), 2);
+ testCount(request, kDocuments - 2);
+ testCount(request, kDocuments - 2, true);
+ }
+};
+
+class QueryStageCountYieldWithLimit : public CountStageTest {
+public:
+ void run() {
+ CountRequest request = createCountRequest(BSON("x" << GTE << 0), 0, 2);
+ testCount(request, 2);
+ testCount(request, 2, true);
+ }
+};
+
+
+class QueryStageCountInsertDuringYield : public CountStageTest {
+public:
+ void run() {
+ CountRequest request = createCountRequest(BSON("x" << 1));
+ testCount(request, kInterjections + 1);
+ testCount(request, kInterjections + 1, true);
+ }
+
+ // This is called 100 times as we scan the collection
+ void interject(CountStage&, int) {
+ insert(BSON(GENOID << "x" << 1));
+ }
+};
+
+class QueryStageCountDeleteDuringYield : public CountStageTest {
+public:
+ void run() {
+ // expected count would be 99 but we delete the second record
+ // after doing the first unit of work
+ CountRequest request = createCountRequest(BSON("x" << GTE << 1));
+ testCount(request, kDocuments - 2);
+ testCount(request, kDocuments - 2, true);
+ }
+
+ // At the point which this is called we are in between counting the first + second record
+ void interject(CountStage& count_stage, int interjection) {
+ if (interjection == 0) {
+ // At this point, our first interjection, we've counted _locs[0]
+ // and are about to count _locs[1]
+ count_stage.invalidate(&_txn, _locs[interjection], INVALIDATION_DELETION);
+ remove(_locs[interjection]);
+
+ count_stage.invalidate(&_txn, _locs[interjection + 1], INVALIDATION_DELETION);
+ remove(_locs[interjection + 1]);
}
- };
-
- class All : public Suite {
- public:
- All() : Suite("query_stage_count") {}
-
- void setupTests() {
- add<QueryStageCountNoChangeDuringYield>();
- add<QueryStageCountYieldWithSkip>();
- add<QueryStageCountYieldWithLimit>();
- add<QueryStageCountInsertDuringYield>();
- add<QueryStageCountDeleteDuringYield>();
- add<QueryStageCountUpdateDuringYield>();
- add<QueryStageCountMultiKeyDuringYield>();
+ }
+};
+
+class QueryStageCountUpdateDuringYield : public CountStageTest {
+public:
+ void run() {
+ // expected count would be kDocuments-2 but we update the first and second records
+ // after doing the first unit of work so they wind up getting counted later on
+ CountRequest request = createCountRequest(BSON("x" << GTE << 2));
+ testCount(request, kDocuments);
+ testCount(request, kDocuments, true);
+ }
+
+ // At the point which this is called we are in between the first and second record
+ void interject(CountStage& count_stage, int interjection) {
+ if (interjection == 0) {
+ count_stage.invalidate(&_txn, _locs[0], INVALIDATION_MUTATION);
+ OID id1 = _coll->docFor(&_txn, _locs[0]).value().getField("_id").OID();
+ update(_locs[0], BSON("_id" << id1 << "x" << 100));
+
+ count_stage.invalidate(&_txn, _locs[1], INVALIDATION_MUTATION);
+ OID id2 = _coll->docFor(&_txn, _locs[1]).value().getField("_id").OID();
+ update(_locs[1], BSON("_id" << id2 << "x" << 100));
}
- } QueryStageCountAll;
-
-} // namespace QueryStageCount
+ }
+};
+
+class QueryStageCountMultiKeyDuringYield : public CountStageTest {
+public:
+ void run() {
+ CountRequest request = createCountRequest(BSON("x" << 1));
+ testCount(request, kDocuments + 1, true); // only applies to indexed case
+ }
+
+ void interject(CountStage&, int) {
+ // Should cause index to be converted to multikey
+ insert(BSON(GENOID << "x" << BSON_ARRAY(1 << 2)));
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_count") {}
+
+ void setupTests() {
+ add<QueryStageCountNoChangeDuringYield>();
+ add<QueryStageCountYieldWithSkip>();
+ add<QueryStageCountYieldWithLimit>();
+ add<QueryStageCountInsertDuringYield>();
+ add<QueryStageCountDeleteDuringYield>();
+ add<QueryStageCountUpdateDuringYield>();
+ add<QueryStageCountMultiKeyDuringYield>();
+ }
+} QueryStageCountAll;
+
+} // namespace QueryStageCount
diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp
index 5e15fcaa3c8..a708c076fa6 100644
--- a/src/mongo/dbtests/query_stage_count_scan.cpp
+++ b/src/mongo/dbtests/query_stage_count_scan.cpp
@@ -46,605 +46,614 @@
namespace QueryStageCountScan {
- using boost::shared_ptr;
-
- class CountBase {
- public:
- CountBase() : _client(&_txn) {
-
+using boost::shared_ptr;
+
+class CountBase {
+public:
+ CountBase() : _client(&_txn) {}
+
+ virtual ~CountBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
+
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ /*
+ * Given a CountScan PlanStage object count, call work() on
+ * count until we reach IS_EOF. Tally up how many objects
+ * we've counted and return the count.
+ */
+ int runCount(CountScan* count) {
+ int countWorks = 0;
+ WorkingSetID wsid;
+
+ PlanStage::StageState countState = count->work(&wsid);
+
+ while (PlanStage::IS_EOF != countState) {
+ if (PlanStage::ADVANCED == countState)
+ countWorks++;
+ countState = count->work(&wsid);
}
-
- virtual ~CountBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
+ return countWorks;
+ }
+
+ IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
+ Collection* collection = db->getCollection(ns());
+ return collection->getIndexCatalog()->findIndexByKeyPattern(&_txn, obj);
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageCountScanScan";
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+private:
+ DBDirectClient _client;
+};
+
+
+//
+// Check that dups are properly identified
+//
+class QueryStageCountScanDups : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert some docs
+ insert(BSON("a" << BSON_ARRAY(5 << 7)));
+ insert(BSON("a" << BSON_ARRAY(6 << 8)));
+
+ // Add an index on a:1
+ addIndex(BSON("a" << 1));
+
+ // Set up the count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
+ verify(params.descriptor);
+ params.startKey = BSON("a" << 1);
+ params.startKeyInclusive = true;
+ params.endKey = BSON("a" << 10);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(2, numCounted);
+ }
+};
+
+//
+// Check that expected results are returned with inclusive bounds
+//
+class QueryStageCountScanInclusiveBounds : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert some docs
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << i));
}
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ // Add an index
+ addIndex(BSON("a" << 1));
+
+ // Set up the count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
+ params.startKey = BSON("" << 3);
+ params.startKeyInclusive = true;
+ params.endKey = BSON("" << 7);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(5, numCounted);
+ }
+};
+
+//
+// Check that expected results are returned with exclusive bounds
+//
+class QueryStageCountScanExclusiveBounds : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert some docs
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << i));
}
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
+ // Add an index
+ addIndex(BSON("a" << 1));
+
+ // Set up the count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
+ params.startKey = BSON("" << 3);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 7);
+ params.endKeyInclusive = false;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(3, numCounted);
+ }
+};
+
+//
+// Check that cursor returns no results if all docs are below lower bound
+//
+class QueryStageCountScanLowerBound : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert doc, add index
+ insert(BSON("a" << 2));
+ addIndex(BSON("a" << 1));
+
+ // Set up count, and run
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
+ params.startKey = BSON("" << 2);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 3);
+ params.endKeyInclusive = false;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(0, numCounted);
+ }
+};
+
+//
+// Check that cursor returns no results if there are no docs within interval
+//
+class QueryStageCountScanNothingInInterval : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert documents, add index
+ insert(BSON("a" << 2));
+ insert(BSON("a" << 3));
+ addIndex(BSON("a" << 1));
+
+ // Set up count, and run
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
+ params.startKey = BSON("" << 2);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 3);
+ params.endKeyInclusive = false;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(0, numCounted);
+ }
+};
+
+//
+// Check that cursor returns no results if there are no docs within interval
+// and the first key located during initialization is above upper bound
+//
+class QueryStageCountScanNothingInIntervalFirstMatchTooHigh : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert some documents, add index
+ insert(BSON("a" << 2));
+ insert(BSON("a" << 4));
+ addIndex(BSON("a" << 1));
+
+ // Set up count, and run
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
+ params.startKey = BSON("" << 2);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 3);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(0, numCounted);
+ }
+};
+
+//
+// Check that cursor recovers its position properly if there is no change
+// during a yield
+//
+class QueryStageCountScanNoChangeDuringYield : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert documents, add index
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << i));
}
-
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
+ addIndex(BSON("a" << 1));
+
+ // Set up count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.startKey = BSON("" << 2);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 6);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+ WorkingSetID wsid;
+
+ int numCounted = 0;
+ PlanStage::StageState countState;
+
+ // Begin running the count
+ while (numCounted < 2) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- /*
- * Given a CountScan PlanStage object count, call work() on
- * count until we reach IS_EOF. Tally up how many objects
- * we've counted and return the count.
- */
- int runCount(CountScan* count) {
- int countWorks = 0;
- WorkingSetID wsid;
-
- PlanStage::StageState countState = count->work(&wsid);
-
- while (PlanStage::IS_EOF != countState) {
- if (PlanStage::ADVANCED == countState) countWorks++;
- countState = count->work(&wsid);
- }
- return countWorks;
- }
+ // Prepare the cursor to yield
+ count.saveState();
- IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- Collection* collection = db->getCollection(ns());
- return collection->getIndexCatalog()->findIndexByKeyPattern(&_txn, obj);
- }
+ // Recover from yield
+ count.restoreState(&_txn);
- static const char* ns() { return "unittests.QueryStageCountScanScan"; }
-
- protected:
- OperationContextImpl _txn;
-
- private:
- DBDirectClient _client;
- };
-
-
- //
- // Check that dups are properly identified
- //
- class QueryStageCountScanDups : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert some docs
- insert(BSON("a" << BSON_ARRAY(5 << 7)));
- insert(BSON("a" << BSON_ARRAY(6 << 8)));
-
- // Add an index on a:1
- addIndex(BSON("a" << 1));
-
- // Set up the count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
- verify(params.descriptor);
- params.startKey = BSON("a" << 1);
- params.startKeyInclusive = true;
- params.endKey = BSON("a" << 10);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(2, numCounted);
+ // finish counting
+ while (PlanStage::IS_EOF != countState) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
-
- //
- // Check that expected results are returned with inclusive bounds
- //
- class QueryStageCountScanInclusiveBounds : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert some docs
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << i));
- }
-
- // Add an index
- addIndex(BSON("a" << 1));
-
- // Set up the count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
- params.startKey = BSON("" << 3);
- params.startKeyInclusive = true;
- params.endKey = BSON("" << 7);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(5, numCounted);
+ ASSERT_EQUALS(4, numCounted);
+ }
+};
+
+//
+// Check that cursor recovers its position properly if its current location
+// is deleted during a yield
+//
+class QueryStageCountScanDeleteDuringYield : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert documents, add index
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << i));
}
- };
-
- //
- // Check that expected results are returned with exclusive bounds
- //
- class QueryStageCountScanExclusiveBounds : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert some docs
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << i));
- }
-
- // Add an index
- addIndex(BSON("a" << 1));
-
- // Set up the count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
- params.startKey = BSON("" << 3);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 7);
- params.endKeyInclusive = false;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(3, numCounted);
+ addIndex(BSON("a" << 1));
+
+ // Set up count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.startKey = BSON("" << 2);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 6);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+ WorkingSetID wsid;
+
+ int numCounted = 0;
+ PlanStage::StageState countState;
+
+ // Begin running the count
+ while (numCounted < 2) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
-
- //
- // Check that cursor returns no results if all docs are below lower bound
- //
- class QueryStageCountScanLowerBound : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert doc, add index
- insert(BSON("a" << 2));
- addIndex(BSON("a" << 1));
-
- // Set up count, and run
- CountScanParams params;
- params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
- params.startKey = BSON("" << 2);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 3);
- params.endKeyInclusive = false;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(0, numCounted);
+
+ // Prepare the cursor to yield
+ count.saveState();
+
+ // Remove remaining objects
+ remove(BSON("a" << GTE << 5));
+
+ // Recover from yield
+ count.restoreState(&_txn);
+
+ // finish counting
+ while (PlanStage::IS_EOF != countState) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
-
- //
- // Check that cursor returns no results if there are no docs within interval
- //
- class QueryStageCountScanNothingInInterval : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert documents, add index
- insert(BSON("a" << 2));
- insert(BSON("a" << 3));
- addIndex(BSON("a" << 1));
-
- // Set up count, and run
- CountScanParams params;
- params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
- params.startKey = BSON("" << 2);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 3);
- params.endKeyInclusive = false;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(0, numCounted);
+ ASSERT_EQUALS(2, numCounted);
+ }
+};
+
+//
+// Check that cursor relocates its end location properly if end location
+// changes during a yield
+//
+class QueryStageCountScanInsertNewDocsDuringYield : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert documents, add index
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << i));
}
- };
-
- //
- // Check that cursor returns no results if there are no docs within interval
- // and the first key located during initialization is above upper bound
- //
- class QueryStageCountScanNothingInIntervalFirstMatchTooHigh : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert some documents, add index
- insert(BSON("a" << 2));
- insert(BSON("a" << 4));
- addIndex(BSON("a" << 1));
-
- // Set up count, and run
- CountScanParams params;
- params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
- params.startKey = BSON("" << 2);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 3);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(0, numCounted);
+ addIndex(BSON("a" << 1));
+
+ // Set up count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.startKey = BSON("" << 2);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 6);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+ WorkingSetID wsid;
+
+ int numCounted = 0;
+ PlanStage::StageState countState;
+
+ // Begin running the count
+ while (numCounted < 2) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
-
- //
- // Check that cursor recovers its position properly if there is no change
- // during a yield
- //
- class QueryStageCountScanNoChangeDuringYield : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert documents, add index
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << i));
- }
- addIndex(BSON("a" << 1));
-
- // Set up count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
- params.startKey = BSON("" << 2);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 6);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
- WorkingSetID wsid;
-
- int numCounted = 0;
- PlanStage::StageState countState;
-
- // Begin running the count
- while (numCounted < 2) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
-
- // Prepare the cursor to yield
- count.saveState();
-
- // Recover from yield
- count.restoreState(&_txn);
-
- // finish counting
- while (PlanStage::IS_EOF != countState) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
- ASSERT_EQUALS(4, numCounted);
+
+ // Prepare the cursor to yield
+ count.saveState();
+
+ // Insert one document before the end
+ insert(BSON("a" << 5.5));
+
+ // Insert one document after the end
+ insert(BSON("a" << 6.5));
+
+ // Recover from yield
+ count.restoreState(&_txn);
+
+ // finish counting
+ while (PlanStage::IS_EOF != countState) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
-
- //
- // Check that cursor recovers its position properly if its current location
- // is deleted during a yield
- //
- class QueryStageCountScanDeleteDuringYield : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert documents, add index
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << i));
- }
- addIndex(BSON("a" << 1));
-
- // Set up count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
- params.startKey = BSON("" << 2);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 6);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
- WorkingSetID wsid;
-
- int numCounted = 0;
- PlanStage::StageState countState;
-
- // Begin running the count
- while (numCounted < 2) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
-
- // Prepare the cursor to yield
- count.saveState();
-
- // Remove remaining objects
- remove(BSON("a" << GTE << 5));
-
- // Recover from yield
- count.restoreState(&_txn);
-
- // finish counting
- while (PlanStage::IS_EOF != countState) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
- ASSERT_EQUALS(2, numCounted);
+ ASSERT_EQUALS(5, numCounted);
+ }
+};
+
+//
+// Check that count performs correctly if an index becomes multikey
+// during a yield
+//
+class QueryStageCountScanBecomesMultiKeyDuringYield : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert documents, add index
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << i));
}
- };
-
- //
- // Check that cursor relocates its end location properly if end location
- // changes during a yield
- //
- class QueryStageCountScanInsertNewDocsDuringYield : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert documents, add index
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << i));
- }
- addIndex(BSON("a" << 1));
-
- // Set up count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
- params.startKey = BSON("" << 2);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 6);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
- WorkingSetID wsid;
-
- int numCounted = 0;
- PlanStage::StageState countState;
-
- // Begin running the count
- while (numCounted < 2) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
-
- // Prepare the cursor to yield
- count.saveState();
-
- // Insert one document before the end
- insert(BSON("a" << 5.5));
-
- // Insert one document after the end
- insert(BSON("a" << 6.5));
-
- // Recover from yield
- count.restoreState(&_txn);
-
- // finish counting
- while (PlanStage::IS_EOF != countState) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
- ASSERT_EQUALS(5, numCounted);
+ addIndex(BSON("a" << 1));
+
+ // Set up count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.startKey = BSON("" << 2);
+ params.startKeyInclusive = false;
+ params.endKey = BSON("" << 50);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+ WorkingSetID wsid;
+
+ int numCounted = 0;
+ PlanStage::StageState countState;
+
+ // Begin running the count
+ while (numCounted < 2) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
-
- //
- // Check that count performs correctly if an index becomes multikey
- // during a yield
- //
- class QueryStageCountScanBecomesMultiKeyDuringYield : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert documents, add index
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << i));
- }
- addIndex(BSON("a" << 1));
-
- // Set up count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
- params.startKey = BSON("" << 2);
- params.startKeyInclusive = false;
- params.endKey = BSON("" << 50);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
- WorkingSetID wsid;
-
- int numCounted = 0;
- PlanStage::StageState countState;
-
- // Begin running the count
- while (numCounted < 2) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
-
- // Prepare the cursor to yield
- count.saveState();
-
- // Insert a document with two values for 'a'
- insert(BSON("a" << BSON_ARRAY(10 << 11)));
-
- // Recover from yield
- count.restoreState(&_txn);
-
- // finish counting
- while (PlanStage::IS_EOF != countState) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
- ASSERT_EQUALS(8, numCounted);
+
+ // Prepare the cursor to yield
+ count.saveState();
+
+ // Insert a document with two values for 'a'
+ insert(BSON("a" << BSON_ARRAY(10 << 11)));
+
+ // Recover from yield
+ count.restoreState(&_txn);
+
+ // finish counting
+ while (PlanStage::IS_EOF != countState) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
-
- //
- // Unused keys are not returned during iteration
- //
- class QueryStageCountScanUnusedKeys : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert docs, add index
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << 1 << "b" << i));
- }
- addIndex(BSON("a" << 1));
-
- // Mark several keys as 'unused'
- remove(BSON("a" << 1 << "b" << 0));
- remove(BSON("a" << 1 << "b" << 3));
- remove(BSON("a" << 1 << "b" << 4));
-
- // Ensure that count does not include unused keys
- CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
- params.startKey = BSON("" << 1);
- params.startKeyInclusive = true;
- params.endKey = BSON("" << 1);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(7, numCounted);
+ ASSERT_EQUALS(8, numCounted);
+ }
+};
+
+//
+// Unused keys are not returned during iteration
+//
+class QueryStageCountScanUnusedKeys : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert docs, add index
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << 1 << "b" << i));
}
- };
-
- //
- // Iteration is properly terminated when the end location is an unused key
- //
- class QueryStageCountScanUnusedEndKey : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert docs, add index
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << 1 << "b" << i ));
- }
- addIndex(BSON("a" << 1));
-
- // Mark key at end position as 'unused' by deleting
- remove(BSON("a" << 1 << "b" << 9));
-
- // Run count and check
- CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
- params.startKey = BSON("" << 0);
- params.startKeyInclusive = true;
- params.endKey = BSON("" << 2);
- params.endKeyInclusive = true; // yes?
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
-
- int numCounted = runCount(&count);
- ASSERT_EQUALS(9, numCounted);
+ addIndex(BSON("a" << 1));
+
+ // Mark several keys as 'unused'
+ remove(BSON("a" << 1 << "b" << 0));
+ remove(BSON("a" << 1 << "b" << 3));
+ remove(BSON("a" << 1 << "b" << 4));
+
+ // Ensure that count does not include unused keys
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.startKey = BSON("" << 1);
+ params.startKeyInclusive = true;
+ params.endKey = BSON("" << 1);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(7, numCounted);
+ }
+};
+
+//
+// Iteration is properly terminated when the end location is an unused key
+//
+class QueryStageCountScanUnusedEndKey : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert docs, add index
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << 1 << "b" << i));
}
- };
-
- //
- // Advances past a key that becomes unused during a yield
- //
- class QueryStageCountScanKeyBecomesUnusedDuringYield : public CountBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- // Insert documents, add index
- for (int i = 0; i < 10; ++i) {
- insert(BSON("a" << 1 << "b" << i));
- }
- addIndex(BSON("a" << 1));
-
- // Set up count stage
- CountScanParams params;
- params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
- params.startKey = BSON("" << 1);
- params.startKeyInclusive = true;
- params.endKey = BSON("" << 1);
- params.endKeyInclusive = true;
-
- WorkingSet ws;
- CountScan count(&_txn, params, &ws);
- WorkingSetID wsid;
-
- int numCounted = 0;
- PlanStage::StageState countState;
-
- // Begin running the count
- while (numCounted < 2) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
-
- // Prepare the cursor to yield
- count.saveState();
-
- // Mark the key at position 5 as 'unused'
- remove(BSON("a" << 1 << "b" << 5));
-
- // Recover from yield
- count.restoreState(&_txn);
-
- // finish counting
- while (PlanStage::IS_EOF != countState) {
- countState = count.work(&wsid);
- if (PlanStage::ADVANCED == countState) numCounted++;
- }
- ASSERT_EQUALS(8, numCounted);
+ addIndex(BSON("a" << 1));
+
+ // Mark key at end position as 'unused' by deleting
+ remove(BSON("a" << 1 << "b" << 9));
+
+ // Run count and check
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.startKey = BSON("" << 0);
+ params.startKeyInclusive = true;
+ params.endKey = BSON("" << 2);
+ params.endKeyInclusive = true; // yes?
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+
+ int numCounted = runCount(&count);
+ ASSERT_EQUALS(9, numCounted);
+ }
+};
+
+//
+// Advances past a key that becomes unused during a yield
+//
+class QueryStageCountScanKeyBecomesUnusedDuringYield : public CountBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Insert documents, add index
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("a" << 1 << "b" << i));
}
- };
-
- class All : public Suite {
- public:
- All() : Suite("query_stage_count_scan") { }
-
- void setupTests() {
- add<QueryStageCountScanDups>();
- add<QueryStageCountScanInclusiveBounds>();
- add<QueryStageCountScanExclusiveBounds>();
- add<QueryStageCountScanLowerBound>();
- add<QueryStageCountScanNothingInInterval>();
- add<QueryStageCountScanNothingInIntervalFirstMatchTooHigh>();
- add<QueryStageCountScanNoChangeDuringYield>();
- add<QueryStageCountScanDeleteDuringYield>();
- add<QueryStageCountScanInsertNewDocsDuringYield>();
- add<QueryStageCountScanBecomesMultiKeyDuringYield>();
- add<QueryStageCountScanUnusedKeys>();
+ addIndex(BSON("a" << 1));
+
+ // Set up count stage
+ CountScanParams params;
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
+ params.startKey = BSON("" << 1);
+ params.startKeyInclusive = true;
+ params.endKey = BSON("" << 1);
+ params.endKeyInclusive = true;
+
+ WorkingSet ws;
+ CountScan count(&_txn, params, &ws);
+ WorkingSetID wsid;
+
+ int numCounted = 0;
+ PlanStage::StageState countState;
+
+ // Begin running the count
+ while (numCounted < 2) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
}
- };
- SuiteInstance<All> queryStageCountScanAll;
+ // Prepare the cursor to yield
+ count.saveState();
-} // namespace QueryStageCountScan
+ // Mark the key at position 5 as 'unused'
+ remove(BSON("a" << 1 << "b" << 5));
+ // Recover from yield
+ count.restoreState(&_txn);
+ // finish counting
+ while (PlanStage::IS_EOF != countState) {
+ countState = count.work(&wsid);
+ if (PlanStage::ADVANCED == countState)
+ numCounted++;
+ }
+ ASSERT_EQUALS(8, numCounted);
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_count_scan") {}
+
+ void setupTests() {
+ add<QueryStageCountScanDups>();
+ add<QueryStageCountScanInclusiveBounds>();
+ add<QueryStageCountScanExclusiveBounds>();
+ add<QueryStageCountScanLowerBound>();
+ add<QueryStageCountScanNothingInInterval>();
+ add<QueryStageCountScanNothingInIntervalFirstMatchTooHigh>();
+ add<QueryStageCountScanNoChangeDuringYield>();
+ add<QueryStageCountScanDeleteDuringYield>();
+ add<QueryStageCountScanInsertNewDocsDuringYield>();
+ add<QueryStageCountScanBecomesMultiKeyDuringYield>();
+ add<QueryStageCountScanUnusedKeys>();
+ }
+};
+
+SuiteInstance<All> queryStageCountScanAll;
+
+} // namespace QueryStageCountScan
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index 5a2385887ce..3161d08a36c 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -42,138 +42,143 @@
namespace QueryStageDelete {
- using boost::scoped_ptr;
- using std::vector;
-
- //
- // Stage-specific tests.
- //
-
- class QueryStageDeleteBase {
- public:
- QueryStageDeleteBase() : _client(&_txn) {
- Client::WriteContext ctx(&_txn, ns());
-
- for (size_t i = 0; i < numObj(); ++i) {
- BSONObjBuilder bob;
- bob.append("foo", static_cast<long long int>(i));
- _client.insert(ns(), bob.obj());
- }
- }
-
- virtual ~QueryStageDeleteBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
+using boost::scoped_ptr;
+using std::vector;
+
+//
+// Stage-specific tests.
+//
+
+class QueryStageDeleteBase {
+public:
+ QueryStageDeleteBase() : _client(&_txn) {
+ Client::WriteContext ctx(&_txn, ns());
+
+ for (size_t i = 0; i < numObj(); ++i) {
+ BSONObjBuilder bob;
+ bob.append("foo", static_cast<long long int>(i));
+ _client.insert(ns(), bob.obj());
}
-
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
- }
-
- void getLocs(Collection* collection,
- CollectionScanParams::Direction direction,
- vector<RecordId>* out) {
- WorkingSet ws;
-
- CollectionScanParams params;
- params.collection = collection;
- params.direction = direction;
- params.tailable = false;
-
- scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
- while (!scan->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- out->push_back(member->loc);
- }
+ }
+
+ virtual ~QueryStageDeleteBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ void getLocs(Collection* collection,
+ CollectionScanParams::Direction direction,
+ vector<RecordId>* out) {
+ WorkingSet ws;
+
+ CollectionScanParams params;
+ params.collection = collection;
+ params.direction = direction;
+ params.tailable = false;
+
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ verify(member->hasLoc());
+ out->push_back(member->loc);
}
}
+ }
+
+ static size_t numObj() {
+ return 50;
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageDelete";
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+private:
+ DBDirectClient _client;
+};
+
+//
+// Test invalidation for the delete stage. Use the delete stage to delete some objects
+// retrieved by a collscan, then invalidate the upcoming object, then expect the delete stage to
+// skip over it and successfully delete the rest.
+//
+class QueryStageDeleteInvalidateUpcomingObject : public QueryStageDeleteBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ Collection* coll = ctx.getCollection();
+
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
+ getLocs(coll, CollectionScanParams::FORWARD, &locs);
+
+ // Configure the scan.
+ CollectionScanParams collScanParams;
+ collScanParams.collection = coll;
+ collScanParams.direction = CollectionScanParams::FORWARD;
+ collScanParams.tailable = false;
+
+ // Configure the delete stage.
+ DeleteStageParams deleteStageParams;
+ deleteStageParams.isMulti = true;
+ deleteStageParams.shouldCallLogOp = false;
+
+ WorkingSet ws;
+ DeleteStage deleteStage(&_txn,
+ deleteStageParams,
+ &ws,
+ coll,
+ new CollectionScan(&_txn, collScanParams, &ws, NULL));
+
+ const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats());
+
+ const size_t targetDocIndex = 10;
+
+ while (stats->docsDeleted < targetDocIndex) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = deleteStage.work(&id);
+ ASSERT_EQUALS(PlanStage::NEED_TIME, state);
+ }
- static size_t numObj() { return 50; }
-
- static const char* ns() { return "unittests.QueryStageDelete"; }
-
- protected:
- OperationContextImpl _txn;
-
- private:
- DBDirectClient _client;
- };
-
- //
- // Test invalidation for the delete stage. Use the delete stage to delete some objects
- // retrieved by a collscan, then invalidate the upcoming object, then expect the delete stage to
- // skip over it and successfully delete the rest.
- //
- class QueryStageDeleteInvalidateUpcomingObject : public QueryStageDeleteBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- Collection* coll = ctx.getCollection();
-
- // Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
-
- // Configure the scan.
- CollectionScanParams collScanParams;
- collScanParams.collection = coll;
- collScanParams.direction = CollectionScanParams::FORWARD;
- collScanParams.tailable = false;
-
- // Configure the delete stage.
- DeleteStageParams deleteStageParams;
- deleteStageParams.isMulti = true;
- deleteStageParams.shouldCallLogOp = false;
-
- WorkingSet ws;
- DeleteStage deleteStage(&_txn, deleteStageParams, &ws, coll,
- new CollectionScan(&_txn, collScanParams, &ws, NULL));
-
- const DeleteStats* stats =
- static_cast<const DeleteStats*>(deleteStage.getSpecificStats());
-
- const size_t targetDocIndex = 10;
-
- while (stats->docsDeleted < targetDocIndex) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = deleteStage.work(&id);
- ASSERT_EQUALS(PlanStage::NEED_TIME, state);
- }
-
- // Remove locs[targetDocIndex];
- deleteStage.saveState();
- deleteStage.invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
- BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
- ASSERT(!targetDoc.isEmpty());
- remove(targetDoc);
- deleteStage.restoreState(&_txn);
-
- // Remove the rest.
- while (!deleteStage.isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = deleteStage.work(&id);
- invariant(PlanStage::NEED_TIME == state || PlanStage::IS_EOF == state);
- }
-
- ASSERT_EQUALS(numObj() - 1, stats->docsDeleted);
+ // Remove locs[targetDocIndex];
+ deleteStage.saveState();
+ deleteStage.invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
+ BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
+ ASSERT(!targetDoc.isEmpty());
+ remove(targetDoc);
+ deleteStage.restoreState(&_txn);
+
+ // Remove the rest.
+ while (!deleteStage.isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = deleteStage.work(&id);
+ invariant(PlanStage::NEED_TIME == state || PlanStage::IS_EOF == state);
}
- };
- class All : public Suite {
- public:
- All() : Suite("query_stage_delete") {}
+ ASSERT_EQUALS(numObj() - 1, stats->docsDeleted);
+ }
+};
- void setupTests() {
- // Stage-specific tests below.
- add<QueryStageDeleteInvalidateUpcomingObject>();
- }
- };
+class All : public Suite {
+public:
+ All() : Suite("query_stage_delete") {}
- SuiteInstance<All> all;
+ void setupTests() {
+ // Stage-specific tests below.
+ add<QueryStageDeleteInvalidateUpcomingObject>();
+ }
+};
+SuiteInstance<All> all;
}
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 3029464cfd1..157b35d2379 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -44,199 +44,200 @@
namespace QueryStageDistinct {
- class DistinctBase {
- public:
- DistinctBase() : _client(&_txn) {
-
+class DistinctBase {
+public:
+ DistinctBase() : _client(&_txn) {}
+
+ virtual ~DistinctBase() {
+ _client.dropCollection(ns());
+ }
+
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
+
+ /**
+ * Returns the projected value from the working set that would
+ * be returned in the 'values' field of the distinct command result.
+ * Limited to NumberInt BSON types because this is the only
+ * BSON type used in this suite of tests.
+ */
+ static int getIntFieldDotted(const WorkingSet& ws,
+ WorkingSetID wsid,
+ const std::string& field) {
+ // For some reason (at least under OS X clang), we cannot refer to INVALID_ID
+ // inside the test assertion macro.
+ WorkingSetID invalid = WorkingSet::INVALID_ID;
+ ASSERT_NOT_EQUALS(invalid, wsid);
+
+ WorkingSetMember* member = ws.get(wsid);
+
+ // Distinct hack execution is always covered.
+ // Key value is retrieved from working set key data
+ // instead of RecordId.
+ ASSERT_FALSE(member->hasObj());
+ BSONElement keyElt;
+ ASSERT_TRUE(member->getFieldDotted(field, &keyElt));
+ ASSERT_TRUE(keyElt.isNumber());
+
+ return keyElt.numberInt();
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageDistinct";
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+private:
+ DBDirectClient _client;
+};
+
+
+// Tests distinct with single key indices.
+class QueryStageDistinctBasic : public DistinctBase {
+public:
+ virtual ~QueryStageDistinctBasic() {}
+
+ void run() {
+ // Insert a ton of documents with a: 1
+ for (size_t i = 0; i < 1000; ++i) {
+ insert(BSON("a" << 1));
}
- virtual ~DistinctBase() {
- _client.dropCollection(ns());
+ // Insert a ton of other documents with a: 2
+ for (size_t i = 0; i < 1000; ++i) {
+ insert(BSON("a" << 2));
}
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ // Make an index on a:1
+ addIndex(BSON("a" << 1));
+
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* coll = ctx.getCollection();
+
+ // Set up the distinct stage.
+ DistinctParams params;
+ params.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("a" << 1));
+ verify(params.descriptor);
+ params.direction = 1;
+ // Distinct-ing over the 0-th field of the keypattern.
+ params.fieldNo = 0;
+ // We'll look at all values in the bounds.
+ params.bounds.isSimpleRange = false;
+ OrderedIntervalList oil("a");
+ oil.intervals.push_back(IndexBoundsBuilder::allValues());
+ params.bounds.fields.push_back(oil);
+
+ WorkingSet ws;
+ DistinctScan distinct(&_txn, params, &ws);
+
+ WorkingSetID wsid;
+ // Get our first result.
+ int firstResultWorks = 0;
+ while (PlanStage::ADVANCED != distinct.work(&wsid)) {
+ ++firstResultWorks;
}
-
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
+ // 5 is a bogus number. There's some amount of setup done by the first few calls but
+ // we should return the first result relatively promptly.
+ ASSERT_LESS_THAN(firstResultWorks, 5);
+ ASSERT_EQUALS(1, getIntFieldDotted(ws, wsid, "a"));
+
+ // Getting our second result should be very quick as we just skip
+ // over the first result.
+ int secondResultWorks = 0;
+ while (PlanStage::ADVANCED != distinct.work(&wsid)) {
+ ++secondResultWorks;
}
-
- /**
- * Returns the projected value from the working set that would
- * be returned in the 'values' field of the distinct command result.
- * Limited to NumberInt BSON types because this is the only
- * BSON type used in this suite of tests.
- */
- static int getIntFieldDotted(const WorkingSet& ws, WorkingSetID wsid,
- const std::string& field) {
- // For some reason (at least under OS X clang), we cannot refer to INVALID_ID
- // inside the test assertion macro.
- WorkingSetID invalid = WorkingSet::INVALID_ID;
- ASSERT_NOT_EQUALS(invalid, wsid);
-
- WorkingSetMember* member = ws.get(wsid);
-
- // Distinct hack execution is always covered.
- // Key value is retrieved from working set key data
- // instead of RecordId.
- ASSERT_FALSE(member->hasObj());
- BSONElement keyElt;
- ASSERT_TRUE(member->getFieldDotted(field, &keyElt));
- ASSERT_TRUE(keyElt.isNumber());
-
- return keyElt.numberInt();
+ ASSERT_EQUALS(2, getIntFieldDotted(ws, wsid, "a"));
+ // This is 0 because we don't have to loop for several values; we just skip over
+ // all the 'a' values.
+ ASSERT_EQUALS(0, secondResultWorks);
+
+ ASSERT_EQUALS(PlanStage::IS_EOF, distinct.work(&wsid));
+ }
+};
+
+// Tests distinct with multikey indices.
+class QueryStageDistinctMultiKey : public DistinctBase {
+public:
+ virtual ~QueryStageDistinctMultiKey() {}
+
+ void run() {
+ // Insert a ton of documents with a: [1, 2, 3]
+ for (size_t i = 0; i < 1000; ++i) {
+ insert(BSON("a" << BSON_ARRAY(1 << 2 << 3)));
}
- static const char* ns() { return "unittests.QueryStageDistinct"; }
-
- protected:
- OperationContextImpl _txn;
-
- private:
- DBDirectClient _client;
- };
-
-
- // Tests distinct with single key indices.
- class QueryStageDistinctBasic : public DistinctBase {
- public:
- virtual ~QueryStageDistinctBasic() { }
-
- void run() {
- // Insert a ton of documents with a: 1
- for (size_t i = 0; i < 1000; ++i) {
- insert(BSON("a" << 1));
- }
-
- // Insert a ton of other documents with a: 2
- for (size_t i = 0; i < 1000; ++i) {
- insert(BSON("a" << 2));
- }
-
- // Make an index on a:1
- addIndex(BSON("a" << 1));
-
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* coll = ctx.getCollection();
-
- // Set up the distinct stage.
- DistinctParams params;
- params.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("a" << 1));
- verify(params.descriptor);
- params.direction = 1;
- // Distinct-ing over the 0-th field of the keypattern.
- params.fieldNo = 0;
- // We'll look at all values in the bounds.
- params.bounds.isSimpleRange = false;
- OrderedIntervalList oil("a");
- oil.intervals.push_back(IndexBoundsBuilder::allValues());
- params.bounds.fields.push_back(oil);
-
- WorkingSet ws;
- DistinctScan distinct(&_txn, params, &ws);
-
- WorkingSetID wsid;
- // Get our first result.
- int firstResultWorks = 0;
- while (PlanStage::ADVANCED != distinct.work(&wsid)) {
- ++firstResultWorks;
- }
- // 5 is a bogus number. There's some amount of setup done by the first few calls but
- // we should return the first result relatively promptly.
- ASSERT_LESS_THAN(firstResultWorks, 5);
- ASSERT_EQUALS(1, getIntFieldDotted(ws, wsid, "a"));
-
- // Getting our second result should be very quick as we just skip
- // over the first result.
- int secondResultWorks = 0;
- while (PlanStage::ADVANCED != distinct.work(&wsid)) {
- ++secondResultWorks;
- }
- ASSERT_EQUALS(2, getIntFieldDotted(ws, wsid, "a"));
- // This is 0 because we don't have to loop for several values; we just skip over
- // all the 'a' values.
- ASSERT_EQUALS(0, secondResultWorks);
-
- ASSERT_EQUALS(PlanStage::IS_EOF, distinct.work(&wsid));
+ // Insert a ton of other documents with a: [4, 5, 6]
+ for (size_t i = 0; i < 1000; ++i) {
+ insert(BSON("a" << BSON_ARRAY(4 << 5 << 6)));
}
- };
-
- // Tests distinct with multikey indices.
- class QueryStageDistinctMultiKey : public DistinctBase {
- public:
- virtual ~QueryStageDistinctMultiKey() { }
-
- void run() {
- // Insert a ton of documents with a: [1, 2, 3]
- for (size_t i = 0; i < 1000; ++i) {
- insert(BSON("a" << BSON_ARRAY(1 << 2 << 3)));
- }
- // Insert a ton of other documents with a: [4, 5, 6]
- for (size_t i = 0; i < 1000; ++i) {
- insert(BSON("a" << BSON_ARRAY(4 << 5 << 6)));
+ // Make an index on a:1
+ addIndex(BSON("a" << 1));
+
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* coll = ctx.getCollection();
+
+ // Set up the distinct stage.
+ DistinctParams params;
+ params.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("a" << 1));
+ ASSERT_TRUE(params.descriptor->isMultikey(&_txn));
+
+ verify(params.descriptor);
+ params.direction = 1;
+ // Distinct-ing over the 0-th field of the keypattern.
+ params.fieldNo = 0;
+ // We'll look at all values in the bounds.
+ params.bounds.isSimpleRange = false;
+ OrderedIntervalList oil("a");
+ oil.intervals.push_back(IndexBoundsBuilder::allValues());
+ params.bounds.fields.push_back(oil);
+
+ WorkingSet ws;
+ DistinctScan distinct(&_txn, params, &ws);
+
+ // We should see each number in the range [1, 6] exactly once.
+ std::set<int> seen;
+
+ WorkingSetID wsid;
+ PlanStage::StageState state;
+ while (PlanStage::IS_EOF != (state = distinct.work(&wsid))) {
+ if (PlanStage::ADVANCED == state) {
+ // Check int value.
+ int currentNumber = getIntFieldDotted(ws, wsid, "a");
+ ASSERT_GREATER_THAN_OR_EQUALS(currentNumber, 1);
+ ASSERT_LESS_THAN_OR_EQUALS(currentNumber, 6);
+
+ // Should see this number only once.
+ ASSERT_TRUE(seen.find(currentNumber) == seen.end());
+ seen.insert(currentNumber);
}
-
- // Make an index on a:1
- addIndex(BSON("a" << 1));
-
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* coll = ctx.getCollection();
-
- // Set up the distinct stage.
- DistinctParams params;
- params.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("a" << 1));
- ASSERT_TRUE(params.descriptor->isMultikey(&_txn));
-
- verify(params.descriptor);
- params.direction = 1;
- // Distinct-ing over the 0-th field of the keypattern.
- params.fieldNo = 0;
- // We'll look at all values in the bounds.
- params.bounds.isSimpleRange = false;
- OrderedIntervalList oil("a");
- oil.intervals.push_back(IndexBoundsBuilder::allValues());
- params.bounds.fields.push_back(oil);
-
- WorkingSet ws;
- DistinctScan distinct(&_txn, params, &ws);
-
- // We should see each number in the range [1, 6] exactly once.
- std::set<int> seen;
-
- WorkingSetID wsid;
- PlanStage::StageState state;
- while (PlanStage::IS_EOF != (state = distinct.work(&wsid))) {
- if (PlanStage::ADVANCED == state) {
- // Check int value.
- int currentNumber = getIntFieldDotted(ws, wsid, "a");
- ASSERT_GREATER_THAN_OR_EQUALS(currentNumber, 1);
- ASSERT_LESS_THAN_OR_EQUALS(currentNumber, 6);
-
- // Should see this number only once.
- ASSERT_TRUE(seen.find(currentNumber) == seen.end());
- seen.insert(currentNumber);
- }
- }
-
- ASSERT_EQUALS(6U, seen.size());
}
- };
- // XXX: add a test case with bounds where skipping to the next key gets us a result that's not
- // valid w.r.t. our query.
+ ASSERT_EQUALS(6U, seen.size());
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "query_stage_distinct" ) { }
+// XXX: add a test case with bounds where skipping to the next key gets us a result that's not
+// valid w.r.t. our query.
- void setupTests() {
- add<QueryStageDistinctBasic>();
- add<QueryStageDistinctMultiKey>();
- }
- };
+class All : public Suite {
+public:
+ All() : Suite("query_stage_distinct") {}
+
+ void setupTests() {
+ add<QueryStageDistinctBasic>();
+ add<QueryStageDistinctMultiKey>();
+ }
+};
- SuiteInstance<All> queryStageDistinctAll;
+SuiteInstance<All> queryStageDistinctAll;
} // namespace QueryStageDistinct
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 248fbf14511..18fa85af5b6 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -46,179 +46,179 @@
namespace QueryStageFetch {
- using boost::shared_ptr;
- using std::auto_ptr;
- using std::set;
-
- class QueryStageFetchBase {
- public:
- QueryStageFetchBase() : _client(&_txn) {
-
+using boost::shared_ptr;
+using std::auto_ptr;
+using std::set;
+
+class QueryStageFetchBase {
+public:
+ QueryStageFetchBase() : _client(&_txn) {}
+
+ virtual ~QueryStageFetchBase() {
+ _client.dropCollection(ns());
+ }
+
+ void getLocs(set<RecordId>* out, Collection* coll) {
+ RecordIterator* it = coll->getIterator(&_txn);
+ while (!it->isEOF()) {
+ RecordId nextLoc = it->getNext();
+ out->insert(nextLoc);
}
-
- virtual ~QueryStageFetchBase() {
- _client.dropCollection(ns());
+ delete it;
+ }
+
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageFetch";
+ }
+
+protected:
+ OperationContextImpl _txn;
+ DBDirectClient _client;
+};
+
+
+//
+// Test that a WSM with an obj is passed through verbatim.
+//
+class FetchStageAlreadyFetched : public QueryStageFetchBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- void getLocs(set<RecordId>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(&_txn);
- while (!it->isEOF()) {
- RecordId nextLoc = it->getNext();
- out->insert(nextLoc);
- }
- delete it;
+ WorkingSet ws;
+
+ // Add an object to the DB.
+ insert(BSON("foo" << 5));
+ set<RecordId> locs;
+ getLocs(&locs, coll);
+ ASSERT_EQUALS(size_t(1), locs.size());
+
+ // Create a mock stage that returns the WSM.
+ auto_ptr<QueuedDataStage> mockStage(new QueuedDataStage(&ws));
+
+ // Mock data.
+ {
+ WorkingSetMember mockMember;
+ mockMember.state = WorkingSetMember::LOC_AND_OBJ;
+ mockMember.loc = *locs.begin();
+ mockMember.obj = coll->docFor(&_txn, mockMember.loc);
+ // Points into our DB.
+ mockStage->pushBack(mockMember);
+
+ mockMember.state = WorkingSetMember::OWNED_OBJ;
+ mockMember.loc = RecordId();
+ mockMember.obj = Snapshotted<BSONObj>(SnapshotId(), BSON("foo" << 6));
+ ASSERT_TRUE(mockMember.obj.value().isOwned());
+ mockStage->pushBack(mockMember);
}
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
+ auto_ptr<FetchStage> fetchStage(
+ new FetchStage(&_txn, &ws, mockStage.release(), NULL, coll));
+
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state;
+
+ // Don't bother doing any fetching if an obj exists already.
+ state = fetchStage->work(&id);
+ ASSERT_EQUALS(PlanStage::ADVANCED, state);
+ state = fetchStage->work(&id);
+ ASSERT_EQUALS(PlanStage::ADVANCED, state);
+
+ // No more data to fetch, so, EOF.
+ state = fetchStage->work(&id);
+ ASSERT_EQUALS(PlanStage::IS_EOF, state);
+ }
+};
+
+//
+// Test matching with fetch.
+//
+class FetchStageFilter : public QueryStageFetchBase {
+public:
+ void run() {
+ ScopedTransaction transaction(&_txn, MODE_IX);
+ Lock::DBLock lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X);
+ Client::Context ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
- }
+ WorkingSet ws;
- static const char* ns() { return "unittests.QueryStageFetch"; }
-
- protected:
- OperationContextImpl _txn;
- DBDirectClient _client;
- };
-
-
- //
- // Test that a WSM with an obj is passed through verbatim.
- //
- class FetchStageAlreadyFetched : public QueryStageFetchBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
-
- WorkingSet ws;
-
- // Add an object to the DB.
- insert(BSON("foo" << 5));
- set<RecordId> locs;
- getLocs(&locs, coll);
- ASSERT_EQUALS(size_t(1), locs.size());
-
- // Create a mock stage that returns the WSM.
- auto_ptr<QueuedDataStage> mockStage(new QueuedDataStage(&ws));
-
- // Mock data.
- {
- WorkingSetMember mockMember;
- mockMember.state = WorkingSetMember::LOC_AND_OBJ;
- mockMember.loc = *locs.begin();
- mockMember.obj = coll->docFor(&_txn, mockMember.loc);
- // Points into our DB.
- mockStage->pushBack(mockMember);
-
- mockMember.state = WorkingSetMember::OWNED_OBJ;
- mockMember.loc = RecordId();
- mockMember.obj = Snapshotted<BSONObj>(SnapshotId(), BSON("foo" << 6));
- ASSERT_TRUE(mockMember.obj.value().isOwned());
- mockStage->pushBack(mockMember);
- }
-
- auto_ptr<FetchStage> fetchStage(new FetchStage(&_txn, &ws, mockStage.release(),
- NULL, coll));
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state;
-
- // Don't bother doing any fetching if an obj exists already.
- state = fetchStage->work(&id);
- ASSERT_EQUALS(PlanStage::ADVANCED, state);
- state = fetchStage->work(&id);
- ASSERT_EQUALS(PlanStage::ADVANCED, state);
-
- // No more data to fetch, so, EOF.
- state = fetchStage->work(&id);
- ASSERT_EQUALS(PlanStage::IS_EOF, state);
- }
- };
-
- //
- // Test matching with fetch.
- //
- class FetchStageFilter : public QueryStageFetchBase {
- public:
- void run() {
- ScopedTransaction transaction(&_txn, MODE_IX);
- Lock::DBLock lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X);
- Client::Context ctx(&_txn, ns());
- Database* db = ctx.db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
-
- WorkingSet ws;
-
- // Add an object to the DB.
- insert(BSON("foo" << 5));
- set<RecordId> locs;
- getLocs(&locs, coll);
- ASSERT_EQUALS(size_t(1), locs.size());
-
- // Create a mock stage that returns the WSM.
- auto_ptr<QueuedDataStage> mockStage(new QueuedDataStage(&ws));
-
- // Mock data.
- {
- WorkingSetMember mockMember;
- mockMember.state = WorkingSetMember::LOC_AND_IDX;
- mockMember.loc = *locs.begin();
-
- // State is loc and index, shouldn't be able to get the foo data inside.
- BSONElement elt;
- ASSERT_FALSE(mockMember.getFieldDotted("foo", &elt));
- mockStage->pushBack(mockMember);
- }
-
- // Make the filter.
- BSONObj filterObj = BSON("foo" << 6);
- StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
- verify(swme.isOK());
- auto_ptr<MatchExpression> filterExpr(swme.getValue());
-
- // Matcher requires that foo==6 but we only have data with foo==5.
- auto_ptr<FetchStage> fetchStage(
- new FetchStage(&_txn, &ws, mockStage.release(), filterExpr.get(), coll));
-
- // First call should return a fetch request as it's not in memory.
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state;
-
- // Normally we'd return the object but we have a filter that prevents it.
- state = fetchStage->work(&id);
- ASSERT_EQUALS(PlanStage::NEED_TIME, state);
-
- // No more data to fetch, so, EOF.
- state = fetchStage->work(&id);
- ASSERT_EQUALS(PlanStage::IS_EOF, state);
- }
- };
+ // Add an object to the DB.
+ insert(BSON("foo" << 5));
+ set<RecordId> locs;
+ getLocs(&locs, coll);
+ ASSERT_EQUALS(size_t(1), locs.size());
+
+ // Create a mock stage that returns the WSM.
+ auto_ptr<QueuedDataStage> mockStage(new QueuedDataStage(&ws));
- class All : public Suite {
- public:
- All() : Suite( "query_stage_fetch" ) { }
+ // Mock data.
+ {
+ WorkingSetMember mockMember;
+ mockMember.state = WorkingSetMember::LOC_AND_IDX;
+ mockMember.loc = *locs.begin();
- void setupTests() {
- add<FetchStageAlreadyFetched>();
- add<FetchStageFilter>();
+ // State is loc and index, shouldn't be able to get the foo data inside.
+ BSONElement elt;
+ ASSERT_FALSE(mockMember.getFieldDotted("foo", &elt));
+ mockStage->pushBack(mockMember);
}
- };
- SuiteInstance<All> queryStageFetchAll;
+ // Make the filter.
+ BSONObj filterObj = BSON("foo" << 6);
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
+ verify(swme.isOK());
+ auto_ptr<MatchExpression> filterExpr(swme.getValue());
+
+ // Matcher requires that foo==6 but we only have data with foo==5.
+ auto_ptr<FetchStage> fetchStage(
+ new FetchStage(&_txn, &ws, mockStage.release(), filterExpr.get(), coll));
+
+ // First call should return a fetch request as it's not in memory.
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state;
+
+ // Normally we'd return the object but we have a filter that prevents it.
+ state = fetchStage->work(&id);
+ ASSERT_EQUALS(PlanStage::NEED_TIME, state);
+
+ // No more data to fetch, so, EOF.
+ state = fetchStage->work(&id);
+ ASSERT_EQUALS(PlanStage::IS_EOF, state);
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_fetch") {}
+
+ void setupTests() {
+ add<FetchStageAlreadyFetched>();
+ add<FetchStageFilter>();
+ }
+};
+
+SuiteInstance<All> queryStageFetchAll;
} // namespace QueryStageFetch
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index fd16177ce51..90fb9b2ef9f 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -36,282 +36,272 @@
namespace QueryStageIxscan {
- class IndexScanTest {
- public:
- IndexScanTest()
- : _txn(),
- _scopedXact(&_txn, MODE_IX),
- _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
- _ctx(&_txn, ns()),
- _coll(NULL) {
-
- }
-
- virtual ~IndexScanTest() { }
-
- virtual void setup() {
- WriteUnitOfWork wunit(&_txn);
-
- _ctx.db()->dropCollection(&_txn, ns());
- _coll = _ctx.db()->createCollection(&_txn, ns());
-
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("x" << 1)));
-
- wunit.commit();
- }
-
- void insert(const BSONObj& doc) {
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(_coll->insertDocument(&_txn, doc, false).getStatus());
- wunit.commit();
- }
-
- /**
- * Works 'ixscan' until it advances. Returns the index key via a pointer to the
- * WorkingSetMember containing the key.
- */
- WorkingSetMember* getNext(IndexScan* ixscan) {
- WorkingSetID out;
-
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (PlanStage::ADVANCED != state) {
- state = ixscan->work(&out);
-
- // There are certain states we shouldn't get.
- ASSERT_NE(PlanStage::IS_EOF, state);
- ASSERT_NE(PlanStage::DEAD, state);
- ASSERT_NE(PlanStage::FAILURE, state);
- }
-
- return _ws.get(out);
- }
-
-
- IndexScan* createIndexScanSimpleRange(BSONObj startKey, BSONObj endKey) {
- IndexCatalog* catalog = _coll->getIndexCatalog();
- IndexDescriptor* descriptor = catalog->findIndexByKeyPattern(&_txn, BSON("x" << 1));
- invariant(descriptor);
-
- // We are not testing indexing here so use maximal bounds
- IndexScanParams params;
- params.descriptor = descriptor;
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = startKey;
- params.bounds.endKey = endKey;
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
-
- // This child stage gets owned and freed by the caller.
- MatchExpression* filter = NULL;
- return new IndexScan(&_txn, params, &_ws, filter);
- }
-
- IndexScan* createIndexScan(BSONObj startKey,
- BSONObj endKey,
- bool startInclusive,
- bool endInclusive,
- int direction = 1) {
- IndexCatalog* catalog = _coll->getIndexCatalog();
- IndexDescriptor* descriptor = catalog->findIndexByKeyPattern(&_txn, BSON("x" << 1));
- invariant(descriptor);
-
- IndexScanParams params;
- params.descriptor = descriptor;
- params.direction = direction;
-
- OrderedIntervalList oil("x");
- BSONObjBuilder bob;
- bob.appendAs(startKey.firstElement(), "");
- bob.appendAs(endKey.firstElement(), "");
- oil.intervals.push_back(Interval(bob.obj(), startInclusive, endInclusive));
- params.bounds.fields.push_back(oil);
-
- MatchExpression* filter = NULL;
- return new IndexScan(&_txn, params, &_ws, filter);
- }
-
- static const char* ns() { return "unittest.QueryStageIxscan"; }
-
- protected:
- OperationContextImpl _txn;
-
- ScopedTransaction _scopedXact;
- Lock::DBLock _dbLock;
- Client::Context _ctx;
- Collection* _coll;
-
- WorkingSet _ws;
- };
-
- // SERVER-15958: Some IndexScanStats info must be initialized on construction of an IndexScan.
- class QueryStageIxscanInitializeStats : public IndexScanTest {
- public:
- void run() {
- setup();
-
- // Make the {x: 1} index multikey by inserting a doc where 'x' is an array.
- insert(fromjson("{_id: 1, x: [1, 2, 3]}"));
-
- std::auto_ptr<IndexScan> ixscan(
- createIndexScanSimpleRange(BSON("x" << 1), BSON("x" << 3)));
-
- // Verify that SpecificStats of 'ixscan' have been properly initialized.
- const IndexScanStats* stats =
- static_cast<const IndexScanStats*>(ixscan->getSpecificStats());
- ASSERT(stats);
- ASSERT_TRUE(stats->isMultiKey);
- ASSERT_EQUALS(stats->keyPattern, BSON("x" << 1));
- }
- };
-
- // SERVER-16437
- class QueryStageIxscanInsertDuringSave : public IndexScanTest {
- public:
- void run() {
- setup();
-
- insert(fromjson("{_id: 1, x: 5}"));
- insert(fromjson("{_id: 2, x: 6}"));
- insert(fromjson("{_id: 3, x: 12}"));
-
- boost::scoped_ptr<IndexScan> ixscan(createIndexScan(BSON("x" << 5),
- BSON("x" << 10),
- true,
- true));
-
- // Expect to get key {'': 5} and then key {'': 6}.
- WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 5));
- member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
-
- // Save state and insert a few indexed docs.
- ixscan->saveState();
- insert(fromjson("{_id: 4, x: 10}"));
- insert(fromjson("{_id: 5, x: 11}"));
- ixscan->restoreState(&_txn);
-
- // Expect EOF: we miss {'': 10} because it is inserted behind the cursor.
- ASSERT(ixscan->isEOF());
- WorkingSetID id;
- ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
- }
- };
-
- // SERVER-16437
- class QueryStageIxscanInsertDuringSaveExclusive : public IndexScanTest {
- public:
- void run() {
- setup();
-
- insert(fromjson("{_id: 1, x: 5}"));
- insert(fromjson("{_id: 2, x: 6}"));
- insert(fromjson("{_id: 3, x: 10}"));
-
- boost::scoped_ptr<IndexScan> ixscan(createIndexScan(BSON("x" << 5),
- BSON("x" << 10),
- false,
- false));
-
- // Expect to get key {'': 6}.
- WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
-
- // Save state and insert an indexed doc.
- ixscan->saveState();
- insert(fromjson("{_id: 4, x: 7}"));
- ixscan->restoreState(&_txn);
-
- // Expect EOF: we miss {'': 7} because it is inserted behind the cursor, and
- // {'': 10} is not in the range (5, 10)
- ASSERT(ixscan->isEOF());
- WorkingSetID id;
- ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
- }
- };
-
- // SERVER-16437
- class QueryStageIxscanInsertDuringSaveExclusive2 : public IndexScanTest {
- public:
- void run() {
- setup();
-
- insert(fromjson("{_id: 1, x: 5}"));
- insert(fromjson("{_id: 2, x: 6}"));
- insert(fromjson("{_id: 3, x: 12}"));
-
- boost::scoped_ptr<IndexScan> ixscan(createIndexScan(BSON("x" << 5),
- BSON("x" << 10),
- false,
- false));
-
- // Expect to get key {'': 6}.
- WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
-
- // Save state and insert an indexed doc.
- ixscan->saveState();
- insert(fromjson("{_id: 4, x: 10}"));
- ixscan->restoreState(&_txn);
-
- // Ensure that we're EOF and we don't erroneously return {'': 12}.
- ASSERT(ixscan->isEOF());
- WorkingSetID id;
- ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
- }
- };
-
- // SERVER-16437
- class QueryStageIxscanInsertDuringSaveReverse : public IndexScanTest {
- public:
- void run() {
- setup();
-
- insert(fromjson("{_id: 1, x: 10}"));
- insert(fromjson("{_id: 2, x: 8}"));
- insert(fromjson("{_id: 3, x: 3}"));
-
- boost::scoped_ptr<IndexScan> ixscan(createIndexScan(BSON("x" << 10),
- BSON("x" << 5),
- true,
- true,
- -1 /* reverse scan */));
-
- // Expect to get key {'': 10} and then {'': 8}.
- WorkingSetMember* member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 10));
- member = getNext(ixscan.get());
- ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 8));
-
- // Save state and insert an indexed doc.
- ixscan->saveState();
- insert(fromjson("{_id: 4, x: 6}"));
- ixscan->restoreState(&_txn);
-
- // Ensure that we're EOF and we don't erroneously return {'': 6}.
- ASSERT(ixscan->isEOF());
- WorkingSetID id;
- ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite("query_stage_ixscan") {}
-
- void setupTests() {
- add<QueryStageIxscanInitializeStats>();
- add<QueryStageIxscanInsertDuringSave>();
- add<QueryStageIxscanInsertDuringSaveExclusive>();
- add<QueryStageIxscanInsertDuringSaveExclusive2>();
- add<QueryStageIxscanInsertDuringSaveReverse>();
+class IndexScanTest {
+public:
+ IndexScanTest()
+ : _txn(),
+ _scopedXact(&_txn, MODE_IX),
+ _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
+ _ctx(&_txn, ns()),
+ _coll(NULL) {}
+
+ virtual ~IndexScanTest() {}
+
+ virtual void setup() {
+ WriteUnitOfWork wunit(&_txn);
+
+ _ctx.db()->dropCollection(&_txn, ns());
+ _coll = _ctx.db()->createCollection(&_txn, ns());
+
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("x" << 1)));
+
+ wunit.commit();
+ }
+
+ void insert(const BSONObj& doc) {
+ WriteUnitOfWork wunit(&_txn);
+ ASSERT_OK(_coll->insertDocument(&_txn, doc, false).getStatus());
+ wunit.commit();
+ }
+
+ /**
+ * Works 'ixscan' until it advances. Returns the index key via a pointer to the
+ * WorkingSetMember containing the key.
+ */
+ WorkingSetMember* getNext(IndexScan* ixscan) {
+ WorkingSetID out;
+
+ PlanStage::StageState state = PlanStage::NEED_TIME;
+ while (PlanStage::ADVANCED != state) {
+ state = ixscan->work(&out);
+
+ // There are certain states we shouldn't get.
+ ASSERT_NE(PlanStage::IS_EOF, state);
+ ASSERT_NE(PlanStage::DEAD, state);
+ ASSERT_NE(PlanStage::FAILURE, state);
}
- } QueryStageIxscanAll;
-} // namespace QueryStageIxscan
+ return _ws.get(out);
+ }
+
+
+ IndexScan* createIndexScanSimpleRange(BSONObj startKey, BSONObj endKey) {
+ IndexCatalog* catalog = _coll->getIndexCatalog();
+ IndexDescriptor* descriptor = catalog->findIndexByKeyPattern(&_txn, BSON("x" << 1));
+ invariant(descriptor);
+
+ // We are not testing indexing here so use maximal bounds
+ IndexScanParams params;
+ params.descriptor = descriptor;
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = startKey;
+ params.bounds.endKey = endKey;
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+
+ // This child stage gets owned and freed by the caller.
+ MatchExpression* filter = NULL;
+ return new IndexScan(&_txn, params, &_ws, filter);
+ }
+
+ IndexScan* createIndexScan(BSONObj startKey,
+ BSONObj endKey,
+ bool startInclusive,
+ bool endInclusive,
+ int direction = 1) {
+ IndexCatalog* catalog = _coll->getIndexCatalog();
+ IndexDescriptor* descriptor = catalog->findIndexByKeyPattern(&_txn, BSON("x" << 1));
+ invariant(descriptor);
+
+ IndexScanParams params;
+ params.descriptor = descriptor;
+ params.direction = direction;
+
+ OrderedIntervalList oil("x");
+ BSONObjBuilder bob;
+ bob.appendAs(startKey.firstElement(), "");
+ bob.appendAs(endKey.firstElement(), "");
+ oil.intervals.push_back(Interval(bob.obj(), startInclusive, endInclusive));
+ params.bounds.fields.push_back(oil);
+
+ MatchExpression* filter = NULL;
+ return new IndexScan(&_txn, params, &_ws, filter);
+ }
+
+ static const char* ns() {
+ return "unittest.QueryStageIxscan";
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+ ScopedTransaction _scopedXact;
+ Lock::DBLock _dbLock;
+ Client::Context _ctx;
+ Collection* _coll;
+
+ WorkingSet _ws;
+};
+
+// SERVER-15958: Some IndexScanStats info must be initialized on construction of an IndexScan.
+class QueryStageIxscanInitializeStats : public IndexScanTest {
+public:
+ void run() {
+ setup();
+
+ // Make the {x: 1} index multikey by inserting a doc where 'x' is an array.
+ insert(fromjson("{_id: 1, x: [1, 2, 3]}"));
+
+ std::auto_ptr<IndexScan> ixscan(createIndexScanSimpleRange(BSON("x" << 1), BSON("x" << 3)));
+
+ // Verify that SpecificStats of 'ixscan' have been properly initialized.
+ const IndexScanStats* stats =
+ static_cast<const IndexScanStats*>(ixscan->getSpecificStats());
+ ASSERT(stats);
+ ASSERT_TRUE(stats->isMultiKey);
+ ASSERT_EQUALS(stats->keyPattern, BSON("x" << 1));
+ }
+};
+
+// SERVER-16437
+class QueryStageIxscanInsertDuringSave : public IndexScanTest {
+public:
+ void run() {
+ setup();
+
+ insert(fromjson("{_id: 1, x: 5}"));
+ insert(fromjson("{_id: 2, x: 6}"));
+ insert(fromjson("{_id: 3, x: 12}"));
+
+ boost::scoped_ptr<IndexScan> ixscan(
+ createIndexScan(BSON("x" << 5), BSON("x" << 10), true, true));
+
+ // Expect to get key {'': 5} and then key {'': 6}.
+ WorkingSetMember* member = getNext(ixscan.get());
+ ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
+ ASSERT_EQ(member->keyData[0].keyData, BSON("" << 5));
+ member = getNext(ixscan.get());
+ ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
+ ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
+
+ // Save state and insert a few indexed docs.
+ ixscan->saveState();
+ insert(fromjson("{_id: 4, x: 10}"));
+ insert(fromjson("{_id: 5, x: 11}"));
+ ixscan->restoreState(&_txn);
+
+ // Expect EOF: we miss {'': 10} because it is inserted behind the cursor.
+ ASSERT(ixscan->isEOF());
+ WorkingSetID id;
+ ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
+ }
+};
+
+// SERVER-16437
+class QueryStageIxscanInsertDuringSaveExclusive : public IndexScanTest {
+public:
+ void run() {
+ setup();
+
+ insert(fromjson("{_id: 1, x: 5}"));
+ insert(fromjson("{_id: 2, x: 6}"));
+ insert(fromjson("{_id: 3, x: 10}"));
+
+ boost::scoped_ptr<IndexScan> ixscan(
+ createIndexScan(BSON("x" << 5), BSON("x" << 10), false, false));
+
+ // Expect to get key {'': 6}.
+ WorkingSetMember* member = getNext(ixscan.get());
+ ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
+ ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
+
+ // Save state and insert an indexed doc.
+ ixscan->saveState();
+ insert(fromjson("{_id: 4, x: 7}"));
+ ixscan->restoreState(&_txn);
+
+ // Expect EOF: we miss {'': 7} because it is inserted behind the cursor, and
+ // {'': 10} is not in the range (5, 10)
+ ASSERT(ixscan->isEOF());
+ WorkingSetID id;
+ ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
+ }
+};
+
+// SERVER-16437
+class QueryStageIxscanInsertDuringSaveExclusive2 : public IndexScanTest {
+public:
+ void run() {
+ setup();
+
+ insert(fromjson("{_id: 1, x: 5}"));
+ insert(fromjson("{_id: 2, x: 6}"));
+ insert(fromjson("{_id: 3, x: 12}"));
+
+ boost::scoped_ptr<IndexScan> ixscan(
+ createIndexScan(BSON("x" << 5), BSON("x" << 10), false, false));
+
+ // Expect to get key {'': 6}.
+ WorkingSetMember* member = getNext(ixscan.get());
+ ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
+ ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
+
+ // Save state and insert an indexed doc.
+ ixscan->saveState();
+ insert(fromjson("{_id: 4, x: 10}"));
+ ixscan->restoreState(&_txn);
+
+ // Ensure that we're EOF and we don't erroneously return {'': 12}.
+ ASSERT(ixscan->isEOF());
+ WorkingSetID id;
+ ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
+ }
+};
+
+// SERVER-16437
+class QueryStageIxscanInsertDuringSaveReverse : public IndexScanTest {
+public:
+ void run() {
+ setup();
+
+ insert(fromjson("{_id: 1, x: 10}"));
+ insert(fromjson("{_id: 2, x: 8}"));
+ insert(fromjson("{_id: 3, x: 3}"));
+
+ boost::scoped_ptr<IndexScan> ixscan(
+ createIndexScan(BSON("x" << 10), BSON("x" << 5), true, true, -1 /* reverse scan */));
+
+ // Expect to get key {'': 10} and then {'': 8}.
+ WorkingSetMember* member = getNext(ixscan.get());
+ ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
+ ASSERT_EQ(member->keyData[0].keyData, BSON("" << 10));
+ member = getNext(ixscan.get());
+ ASSERT_EQ(WorkingSetMember::LOC_AND_IDX, member->state);
+ ASSERT_EQ(member->keyData[0].keyData, BSON("" << 8));
+
+ // Save state and insert an indexed doc.
+ ixscan->saveState();
+ insert(fromjson("{_id: 4, x: 6}"));
+ ixscan->restoreState(&_txn);
+
+ // Ensure that we're EOF and we don't erroneously return {'': 6}.
+ ASSERT(ixscan->isEOF());
+ WorkingSetID id;
+ ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_ixscan") {}
+
+ void setupTests() {
+ add<QueryStageIxscanInitializeStats>();
+ add<QueryStageIxscanInsertDuringSave>();
+ add<QueryStageIxscanInsertDuringSaveExclusive>();
+ add<QueryStageIxscanInsertDuringSaveExclusive2>();
+ add<QueryStageIxscanInsertDuringSaveReverse>();
+ }
+} QueryStageIxscanAll;
+
+} // namespace QueryStageIxscan
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 5a6fb4e5f34..d7af3695117 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -51,197 +51,197 @@
namespace QueryStageKeep {
- using boost::shared_ptr;
- using std::set;
-
- class QueryStageKeepBase {
- public:
- QueryStageKeepBase() : _client(&_txn) {
-
- }
-
- virtual ~QueryStageKeepBase() {
- _client.dropCollection(ns());
+using boost::shared_ptr;
+using std::set;
+
+class QueryStageKeepBase {
+public:
+ QueryStageKeepBase() : _client(&_txn) {}
+
+ virtual ~QueryStageKeepBase() {
+ _client.dropCollection(ns());
+ }
+
+ void getLocs(set<RecordId>* out, Collection* coll) {
+ RecordIterator* it = coll->getIterator(&_txn);
+ while (!it->isEOF()) {
+ RecordId nextLoc = it->getNext();
+ out->insert(nextLoc);
}
-
- void getLocs(set<RecordId>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(&_txn);
- while (!it->isEOF()) {
- RecordId nextLoc = it->getNext();
- out->insert(nextLoc);
+ delete it;
+ }
+
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageKeep";
+ }
+
+ WorkingSetID getNextResult(PlanStage* stage) {
+ while (!stage->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = stage->work(&id);
+ if (PlanStage::ADVANCED == status) {
+ return id;
}
- delete it;
}
-
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
- }
-
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
+ return WorkingSet::INVALID_ID;
+ }
+
+protected:
+ OperationContextImpl _txn;
+ DBDirectClient _client;
+};
+
+
+// Test that we actually merge flagged results.
+
+//
+// Test that a fetch is passed up when it's not in memory.
+//
+class KeepStageBasic : public QueryStageKeepBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- static const char* ns() { return "unittests.QueryStageKeep"; }
+ WorkingSet ws;
- WorkingSetID getNextResult(PlanStage* stage) {
- while (!stage->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = stage->work(&id);
- if (PlanStage::ADVANCED == status) {
- return id;
- }
- }
- return WorkingSet::INVALID_ID;
+ // Add 10 objects to the collection.
+ for (size_t i = 0; i < 10; ++i) {
+ insert(BSON("x" << 1));
}
- protected:
- OperationContextImpl _txn;
- DBDirectClient _client;
- };
-
-
- // Test that we actually merge flagged results.
-
- //
- // Test that a fetch is passed up when it's not in memory.
- //
- class KeepStageBasic : public QueryStageKeepBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+ // Create 10 objects that are flagged.
+ for (size_t i = 0; i < 10; ++i) {
+ WorkingSetID id = ws.allocate();
+ WorkingSetMember* member = ws.get(id);
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 2));
+ ws.flagForReview(id);
+ }
- WorkingSet ws;
+ // Create a collscan to provide the 10 objects in the collection.
+ CollectionScanParams params;
+ params.collection = coll;
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+ params.start = RecordId();
+ CollectionScan* cs = new CollectionScan(&_txn, params, &ws, NULL);
- // Add 10 objects to the collection.
- for (size_t i = 0; i < 10; ++i) {
- insert(BSON("x" << 1));
- }
+ // Create a KeepMutations stage to merge in the 10 flagged objects.
+ // Takes ownership of 'cs'
+ MatchExpression* nullFilter = NULL;
+ std::auto_ptr<KeepMutationsStage> keep(new KeepMutationsStage(nullFilter, &ws, cs));
- // Create 10 objects that are flagged.
- for (size_t i = 0; i < 10; ++i) {
- WorkingSetID id = ws.allocate();
- WorkingSetMember* member = ws.get(id);
- member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 2));
- ws.flagForReview(id);
- }
+ for (size_t i = 0; i < 10; ++i) {
+ WorkingSetID id = getNextResult(keep.get());
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_FALSE(ws.isFlagged(id));
+ ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 1);
+ }
- // Create a collscan to provide the 10 objects in the collection.
- CollectionScanParams params;
- params.collection = coll;
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = false;
- params.start = RecordId();
- CollectionScan* cs = new CollectionScan(&_txn, params, &ws, NULL);
-
- // Create a KeepMutations stage to merge in the 10 flagged objects.
- // Takes ownership of 'cs'
- MatchExpression* nullFilter = NULL;
- std::auto_ptr<KeepMutationsStage> keep(new KeepMutationsStage(nullFilter, &ws, cs));
-
- for (size_t i = 0; i < 10; ++i) {
- WorkingSetID id = getNextResult(keep.get());
- WorkingSetMember* member = ws.get(id);
- ASSERT_FALSE(ws.isFlagged(id));
- ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 1);
- }
+ ASSERT(cs->isEOF());
- ASSERT(cs->isEOF());
+ // Flagged results *must* be at the end.
+ for (size_t i = 0; i < 10; ++i) {
+ WorkingSetID id = getNextResult(keep.get());
+ WorkingSetMember* member = ws.get(id);
+ ASSERT(ws.isFlagged(id));
+ ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 2);
+ }
+ }
+};
- // Flagged results *must* be at the end.
- for (size_t i = 0; i < 10; ++i) {
- WorkingSetID id = getNextResult(keep.get());
- WorkingSetMember* member = ws.get(id);
- ASSERT(ws.isFlagged(id));
- ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 2);
- }
+/**
+ * SERVER-15580: test that the KeepMutationsStage behaves correctly if additional results are
+ * flagged after some flagged results have already been returned.
+ */
+class KeepStageFlagAdditionalAfterStreamingStarts : public QueryStageKeepBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
+ WorkingSet ws;
+
+ std::set<WorkingSetID> expectedResultIds;
+ std::set<WorkingSetID> resultIds;
+
+ // Create a KeepMutationsStage with an EOF child, and flag 50 objects. We expect these
+ // objects to be returned by the KeepMutationsStage.
+ MatchExpression* nullFilter = NULL;
+ std::auto_ptr<KeepMutationsStage> keep(
+ new KeepMutationsStage(nullFilter, &ws, new EOFStage()));
+ for (size_t i = 0; i < 50; ++i) {
+ WorkingSetID id = ws.allocate();
+ WorkingSetMember* member = ws.get(id);
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 1));
+ ws.flagForReview(id);
+ expectedResultIds.insert(id);
}
- };
-
- /**
- * SERVER-15580: test that the KeepMutationsStage behaves correctly if additional results are
- * flagged after some flagged results have already been returned.
- */
- class KeepStageFlagAdditionalAfterStreamingStarts : public QueryStageKeepBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
-
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- WorkingSet ws;
-
- std::set<WorkingSetID> expectedResultIds;
- std::set<WorkingSetID> resultIds;
-
- // Create a KeepMutationsStage with an EOF child, and flag 50 objects. We expect these
- // objects to be returned by the KeepMutationsStage.
- MatchExpression* nullFilter = NULL;
- std::auto_ptr<KeepMutationsStage> keep(new KeepMutationsStage(nullFilter, &ws,
- new EOFStage()));
- for (size_t i = 0; i < 50; ++i) {
- WorkingSetID id = ws.allocate();
- WorkingSetMember* member = ws.get(id);
- member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 1));
- ws.flagForReview(id);
- expectedResultIds.insert(id);
- }
- // Call work() on the KeepMutationsStage. The stage should start streaming the
- // already-flagged objects.
- WorkingSetID id = getNextResult(keep.get());
+ // Call work() on the KeepMutationsStage. The stage should start streaming the
+ // already-flagged objects.
+ WorkingSetID id = getNextResult(keep.get());
+ resultIds.insert(id);
+
+ // Flag more objects, then call work() again on the KeepMutationsStage, and expect none
+ // of the newly-flagged objects to be returned (the KeepMutationsStage does not
+ // incorporate objects flagged since the streaming phase started).
+ //
+ // This condition triggers SERVER-15580 (the new flagging causes a rehash of the
+ // unordered_set "WorkingSet::_flagged", which invalidates all iterators, which were
+ // previously being dereferenced in KeepMutationsStage::work()).
+ // Note that std::unordered_set<>::insert() triggers a rehash if the new number of
+ // elements is greater than or equal to max_load_factor()*bucket_count().
+ size_t rehashSize =
+ static_cast<size_t>(ws.getFlagged().max_load_factor() * ws.getFlagged().bucket_count());
+ while (ws.getFlagged().size() <= rehashSize) {
+ WorkingSetID id = ws.allocate();
+ WorkingSetMember* member = ws.get(id);
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 1));
+ ws.flagForReview(id);
+ }
+ while ((id = getNextResult(keep.get())) != WorkingSet::INVALID_ID) {
resultIds.insert(id);
-
- // Flag more objects, then call work() again on the KeepMutationsStage, and expect none
- // of the newly-flagged objects to be returned (the KeepMutationsStage does not
- // incorporate objects flagged since the streaming phase started).
- //
- // This condition triggers SERVER-15580 (the new flagging causes a rehash of the
- // unordered_set "WorkingSet::_flagged", which invalidates all iterators, which were
- // previously being dereferenced in KeepMutationsStage::work()).
- // Note that std::unordered_set<>::insert() triggers a rehash if the new number of
- // elements is greater than or equal to max_load_factor()*bucket_count().
- size_t rehashSize = static_cast<size_t>(ws.getFlagged().max_load_factor() *
- ws.getFlagged().bucket_count());
- while (ws.getFlagged().size() <= rehashSize) {
- WorkingSetID id = ws.allocate();
- WorkingSetMember* member = ws.get(id);
- member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 1));
- ws.flagForReview(id);
- }
- while ((id = getNextResult(keep.get())) != WorkingSet::INVALID_ID) {
- resultIds.insert(id);
- }
-
- // Assert that only the first 50 objects were returned.
- ASSERT(expectedResultIds == resultIds);
}
- };
- class All : public Suite {
- public:
- All() : Suite( "query_stage_keep" ) { }
+ // Assert that only the first 50 objects were returned.
+ ASSERT(expectedResultIds == resultIds);
+ }
+};
- void setupTests() {
- add<KeepStageBasic>();
- add<KeepStageFlagAdditionalAfterStreamingStarts>();
- }
- };
+class All : public Suite {
+public:
+ All() : Suite("query_stage_keep") {}
+
+ void setupTests() {
+ add<KeepStageBasic>();
+ add<KeepStageFlagAdditionalAfterStreamingStarts>();
+ }
+};
- SuiteInstance<All> queryStageKeepAll;
+SuiteInstance<All> queryStageKeepAll;
} // namespace QueryStageKeep
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index bc4697327de..6d4a39bec26 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -45,68 +45,70 @@ using namespace mongo;
namespace {
- using boost::scoped_ptr;
- using std::auto_ptr;
- using std::max;
- using std::min;
-
- static const int N = 50;
-
- /* Populate a QueuedDataStage and return it. Caller owns it. */
- QueuedDataStage* getMS(WorkingSet* ws) {
- auto_ptr<QueuedDataStage> ms(new QueuedDataStage(ws));
-
- // Put N ADVANCED results into the mock stage, and some other stalling results (YIELD/TIME).
- for (int i = 0; i < N; ++i) {
- ms->pushBack(PlanStage::NEED_TIME);
- WorkingSetMember wsm;
- wsm.state = WorkingSetMember::OWNED_OBJ;
- wsm.obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << i));
- ms->pushBack(wsm);
- ms->pushBack(PlanStage::NEED_TIME);
- }
-
- return ms.release();
+using boost::scoped_ptr;
+using std::auto_ptr;
+using std::max;
+using std::min;
+
+static const int N = 50;
+
+/* Populate a QueuedDataStage and return it. Caller owns it. */
+QueuedDataStage* getMS(WorkingSet* ws) {
+ auto_ptr<QueuedDataStage> ms(new QueuedDataStage(ws));
+
+ // Put N ADVANCED results into the mock stage, and some other stalling results (YIELD/TIME).
+ for (int i = 0; i < N; ++i) {
+ ms->pushBack(PlanStage::NEED_TIME);
+ WorkingSetMember wsm;
+ wsm.state = WorkingSetMember::OWNED_OBJ;
+ wsm.obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << i));
+ ms->pushBack(wsm);
+ ms->pushBack(PlanStage::NEED_TIME);
}
- int countResults(PlanStage* stage) {
- int count = 0;
- while (!stage->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = stage->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
- ++count;
+ return ms.release();
+}
+
+int countResults(PlanStage* stage) {
+ int count = 0;
+ while (!stage->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = stage->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
- return count;
+ ++count;
}
-
- //
- // Insert 50 objects. Filter/skip 0, 1, 2, ..., 100 objects and expect the right # of results.
- //
- class QueryStageLimitSkipBasicTest {
- public:
- void run() {
- for (int i = 0; i < 2 * N; ++i) {
- WorkingSet ws;
-
- scoped_ptr<PlanStage> skip(new SkipStage(i, &ws, getMS(&ws)));
- ASSERT_EQUALS(max(0, N - i), countResults(skip.get()));
-
- scoped_ptr<PlanStage> limit(new LimitStage(i, &ws, getMS(&ws)));
- ASSERT_EQUALS(min(N, i), countResults(limit.get()));
- }
+ return count;
+}
+
+//
+// Insert 50 objects. Filter/skip 0, 1, 2, ..., 100 objects and expect the right # of results.
+//
+class QueryStageLimitSkipBasicTest {
+public:
+ void run() {
+ for (int i = 0; i < 2 * N; ++i) {
+ WorkingSet ws;
+
+ scoped_ptr<PlanStage> skip(new SkipStage(i, &ws, getMS(&ws)));
+ ASSERT_EQUALS(max(0, N - i), countResults(skip.get()));
+
+ scoped_ptr<PlanStage> limit(new LimitStage(i, &ws, getMS(&ws)));
+ ASSERT_EQUALS(min(N, i), countResults(limit.get()));
}
- };
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "query_stage_limit_skip" ) { }
+class All : public Suite {
+public:
+ All() : Suite("query_stage_limit_skip") {}
- void setupTests() {
- add<QueryStageLimitSkipBasicTest>();
- }
- };
+ void setupTests() {
+ add<QueryStageLimitSkipBasicTest>();
+ }
+};
- SuiteInstance<All> queryStageLimitSkipAll;
+SuiteInstance<All> queryStageLimitSkipAll;
} // namespace
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index e2b0d9e44fb..12f8c701344 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -45,543 +45,569 @@
namespace QueryStageMergeSortTests {
- using std::auto_ptr;
- using std::set;
- using std::string;
-
- class QueryStageMergeSortTestBase {
- public:
- QueryStageMergeSortTestBase() : _client(&_txn) {
-
+using std::auto_ptr;
+using std::set;
+using std::string;
+
+class QueryStageMergeSortTestBase {
+public:
+ QueryStageMergeSortTestBase() : _client(&_txn) {}
+
+ virtual ~QueryStageMergeSortTestBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
+
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) {
+ return coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, obj);
+ }
+
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ void getLocs(set<RecordId>* out, Collection* coll) {
+ RecordIterator* it = coll->getIterator(&_txn);
+ while (!it->isEOF()) {
+ RecordId nextLoc = it->getNext();
+ out->insert(nextLoc);
}
-
- virtual ~QueryStageMergeSortTestBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
+ delete it;
+ }
+
+ BSONObj objWithMinKey(int start) {
+ BSONObjBuilder startKeyBob;
+ startKeyBob.append("", start);
+ startKeyBob.appendMinKey("");
+ return startKeyBob.obj();
+ }
+
+ BSONObj objWithMaxKey(int start) {
+ BSONObjBuilder endKeyBob;
+ endKeyBob.append("", start);
+ endKeyBob.appendMaxKey("");
+ return endKeyBob.obj();
+ }
+
+ static const char* ns() {
+ return "unittests.QueryStageMergeSort";
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+private:
+ DBDirectClient _client;
+};
+
+// SERVER-1205:
+// find($or[{a:1}, {b:1}]).sort({c:1}) with indices {a:1, c:1} and {b:1, c:1}.
+class QueryStageMergeSortPrefixIndex : public QueryStageMergeSortTestBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
- }
+ const int N = 50;
- IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) {
- return coll->getIndexCatalog()->findIndexByKeyPattern( &_txn, obj );
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "c" << i));
+ insert(BSON("b" << 1 << "c" << i));
}
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
+ BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
+ BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
+
+ addIndex(firstIndex);
+ addIndex(secondIndex);
+
+ WorkingSet* ws = new WorkingSet();
+ // Sort by c:1
+ MergeSortStageParams msparams;
+ msparams.pattern = BSON("c" << 1);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+
+ // a:1
+ IndexScanParams params;
+ params.descriptor = getIndex(firstIndex, coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = objWithMinKey(1);
+ params.bounds.endKey = objWithMaxKey(1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ // b:1
+ params.descriptor = getIndex(secondIndex, coll);
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ // Must fetch if we want to easily pull out an obj.
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, ms, NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ for (int i = 0; i < N; ++i) {
+ BSONObj first, second;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
+ ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
+ ASSERT_EQUALS(i, first["c"].numberInt());
+ ASSERT((first.hasField("a") && second.hasField("b")) ||
+ (first.hasField("b") && second.hasField("a")));
}
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
+ // Should be done now.
+ BSONObj foo;
+ ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL));
+ }
+};
+
+// Each inserted document appears in both indices but is deduped and returned properly/sorted.
+class QueryStageMergeSortDups : public QueryStageMergeSortTestBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- void getLocs(set<RecordId>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(&_txn);
- while (!it->isEOF()) {
- RecordId nextLoc = it->getNext();
- out->insert(nextLoc);
- }
- delete it;
- }
+ const int N = 50;
- BSONObj objWithMinKey(int start) {
- BSONObjBuilder startKeyBob;
- startKeyBob.append("", start);
- startKeyBob.appendMinKey("");
- return startKeyBob.obj();
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "b" << 1 << "c" << i));
+ insert(BSON("a" << 1 << "b" << 1 << "c" << i));
}
- BSONObj objWithMaxKey(int start) {
- BSONObjBuilder endKeyBob;
- endKeyBob.append("", start);
- endKeyBob.appendMaxKey("");
- return endKeyBob.obj();
+ BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
+ BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
+
+ addIndex(firstIndex);
+ addIndex(secondIndex);
+
+ WorkingSet* ws = new WorkingSet();
+ // Sort by c:1
+ MergeSortStageParams msparams;
+ msparams.pattern = BSON("c" << 1);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+
+ // a:1
+ IndexScanParams params;
+ params.descriptor = getIndex(firstIndex, coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = objWithMinKey(1);
+ params.bounds.endKey = objWithMaxKey(1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ // b:1
+ params.descriptor = getIndex(secondIndex, coll);
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, ms, NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ for (int i = 0; i < N; ++i) {
+ BSONObj first, second;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
+ ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
+ ASSERT_EQUALS(i, first["c"].numberInt());
+ ASSERT((first.hasField("a") && second.hasField("b")) ||
+ (first.hasField("b") && second.hasField("a")));
}
- static const char* ns() { return "unittests.QueryStageMergeSort"; }
-
- protected:
- OperationContextImpl _txn;
-
- private:
- DBDirectClient _client;
- };
-
- // SERVER-1205:
- // find($or[{a:1}, {b:1}]).sort({c:1}) with indices {a:1, c:1} and {b:1, c:1}.
- class QueryStageMergeSortPrefixIndex : public QueryStageMergeSortTestBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
-
- const int N = 50;
-
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "c" << i));
- insert(BSON("b" << 1 << "c" << i));
- }
-
- BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
- BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
-
- addIndex(firstIndex);
- addIndex(secondIndex);
-
- WorkingSet* ws = new WorkingSet();
- // Sort by c:1
- MergeSortStageParams msparams;
- msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
-
- // a:1
- IndexScanParams params;
- params.descriptor = getIndex(firstIndex, coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = objWithMinKey(1);
- params.bounds.endKey = objWithMaxKey(1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- // b:1
- params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- // Must fetch if we want to easily pull out an obj.
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, new FetchStage(&_txn, ws, ms, NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- for (int i = 0; i < N; ++i) {
- BSONObj first, second;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
- ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
- ASSERT_EQUALS(i, first["c"].numberInt());
- ASSERT((first.hasField("a") && second.hasField("b"))
- || (first.hasField("b") && second.hasField("a")));
- }
-
- // Should be done now.
- BSONObj foo;
- ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL));
+ // Should be done now.
+ BSONObj foo;
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ }
+};
+
+// Each inserted document appears in both indices, no deduping, get each result twice.
+class QueryStageMergeSortDupsNoDedup : public QueryStageMergeSortTestBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // Each inserted document appears in both indices but is deduped and returned properly/sorted.
- class QueryStageMergeSortDups : public QueryStageMergeSortTestBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
-
- const int N = 50;
-
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "b" << 1 << "c" << i));
- insert(BSON("a" << 1 << "b" << 1 << "c" << i));
- }
-
- BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
- BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
-
- addIndex(firstIndex);
- addIndex(secondIndex);
-
- WorkingSet* ws = new WorkingSet();
- // Sort by c:1
- MergeSortStageParams msparams;
- msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
-
- // a:1
- IndexScanParams params;
- params.descriptor = getIndex(firstIndex, coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = objWithMinKey(1);
- params.bounds.endKey = objWithMaxKey(1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
- // b:1
- params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, new FetchStage(&_txn, ws, ms, NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- for (int i = 0; i < N; ++i) {
- BSONObj first, second;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
- ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
- ASSERT_EQUALS(i, first["c"].numberInt());
- ASSERT((first.hasField("a") && second.hasField("b"))
- || (first.hasField("b") && second.hasField("a")));
- }
+ const int N = 50;
- // Should be done now.
- BSONObj foo;
- ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "b" << 1 << "c" << i));
}
- };
-
- // Each inserted document appears in both indices, no deduping, get each result twice.
- class QueryStageMergeSortDupsNoDedup : public QueryStageMergeSortTestBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
-
- const int N = 50;
-
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "b" << 1 << "c" << i));
- }
-
- BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
- BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
-
- addIndex(firstIndex);
- addIndex(secondIndex);
-
- WorkingSet* ws = new WorkingSet();
- // Sort by c:1
- MergeSortStageParams msparams;
- msparams.dedup = false;
- msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
-
- // a:1
- IndexScanParams params;
- params.descriptor = getIndex(firstIndex, coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = objWithMinKey(1);
- params.bounds.endKey = objWithMaxKey(1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- // b:1
- params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, new FetchStage(&_txn, ws, ms, NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- for (int i = 0; i < N; ++i) {
- BSONObj first, second;
- // We inserted N objects but we get 2 * N from the runner because of dups.
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
- ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
- ASSERT_EQUALS(i, first["c"].numberInt());
- ASSERT((first.hasField("a") && second.hasField("b"))
- || (first.hasField("b") && second.hasField("a")));
- }
+ BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
+ BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
+
+ addIndex(firstIndex);
+ addIndex(secondIndex);
+
+ WorkingSet* ws = new WorkingSet();
+ // Sort by c:1
+ MergeSortStageParams msparams;
+ msparams.dedup = false;
+ msparams.pattern = BSON("c" << 1);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+
+ // a:1
+ IndexScanParams params;
+ params.descriptor = getIndex(firstIndex, coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = objWithMinKey(1);
+ params.bounds.endKey = objWithMaxKey(1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ // b:1
+ params.descriptor = getIndex(secondIndex, coll);
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, ms, NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ for (int i = 0; i < N; ++i) {
+ BSONObj first, second;
+ // We inserted N objects but we get 2 * N from the runner because of dups.
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
+ ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
+ ASSERT_EQUALS(i, first["c"].numberInt());
+ ASSERT((first.hasField("a") && second.hasField("b")) ||
+ (first.hasField("b") && second.hasField("a")));
+ }
- // Should be done now.
- BSONObj foo;
- ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ // Should be done now.
+ BSONObj foo;
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ }
+};
+
+// Decreasing indices merged ok. Basically the test above but decreasing.
+class QueryStageMergeSortPrefixIndexReverse : public QueryStageMergeSortTestBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // Decreasing indices merged ok. Basically the test above but decreasing.
- class QueryStageMergeSortPrefixIndexReverse : public QueryStageMergeSortTestBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- const int N = 50;
+ const int N = 50;
- for (int i = 0; i < N; ++i) {
- // We insert a:1 c:i for i=0..49 but in reverse order for the heck of it.
- insert(BSON("a" << 1 << "c" << N - i - 1));
- insert(BSON("b" << 1 << "c" << i));
- }
+ for (int i = 0; i < N; ++i) {
+ // We insert a:1 c:i for i=0..49 but in reverse order for the heck of it.
+ insert(BSON("a" << 1 << "c" << N - i - 1));
+ insert(BSON("b" << 1 << "c" << i));
+ }
- BSONObj firstIndex = BSON("a" << 1 << "c" << -1);
- BSONObj secondIndex = BSON("b" << 1 << "c" << -1);
-
- addIndex(firstIndex);
- addIndex(secondIndex);
-
- WorkingSet* ws = new WorkingSet();
- // Sort by c:-1
- MergeSortStageParams msparams;
- msparams.pattern = BSON("c" << -1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
-
- // a:1
- IndexScanParams params;
- params.descriptor = getIndex(firstIndex, coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = objWithMaxKey(1);
- params.bounds.endKey = objWithMinKey(1);
- params.bounds.endKeyInclusive = true;
- // This is the direction along the index.
- params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ BSONObj firstIndex = BSON("a" << 1 << "c" << -1);
+ BSONObj secondIndex = BSON("b" << 1 << "c" << -1);
+
+ addIndex(firstIndex);
+ addIndex(secondIndex);
+
+ WorkingSet* ws = new WorkingSet();
+ // Sort by c:-1
+ MergeSortStageParams msparams;
+ msparams.pattern = BSON("c" << -1);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+
+ // a:1
+ IndexScanParams params;
+ params.descriptor = getIndex(firstIndex, coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = objWithMaxKey(1);
+ params.bounds.endKey = objWithMinKey(1);
+ params.bounds.endKeyInclusive = true;
+ // This is the direction along the index.
+ params.direction = 1;
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ // b:1
+ params.descriptor = getIndex(secondIndex, coll);
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, ms, NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ for (int i = 0; i < N; ++i) {
+ BSONObj first, second;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
+ ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
+ ASSERT_EQUALS(N - i - 1, first["c"].numberInt());
+ ASSERT((first.hasField("a") && second.hasField("b")) ||
+ (first.hasField("b") && second.hasField("a")));
+ }
- // b:1
- params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ // Should be done now.
+ BSONObj foo;
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ }
+};
+
+// One stage EOF immediately
+class QueryStageMergeSortOneStageEOF : public QueryStageMergeSortTestBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, new FetchStage(&_txn, ws, ms, NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- for (int i = 0; i < N; ++i) {
- BSONObj first, second;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL));
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL));
- ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
- ASSERT_EQUALS(N - i - 1, first["c"].numberInt());
- ASSERT((first.hasField("a") && second.hasField("b"))
- || (first.hasField("b") && second.hasField("a")));
- }
+ const int N = 50;
- // Should be done now.
- BSONObj foo;
- ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("a" << 1 << "c" << i));
+ insert(BSON("b" << 1 << "c" << i));
}
- };
-
- // One stage EOF immediately
- class QueryStageMergeSortOneStageEOF : public QueryStageMergeSortTestBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
-
- const int N = 50;
- for (int i = 0; i < N; ++i) {
- insert(BSON("a" << 1 << "c" << i));
- insert(BSON("b" << 1 << "c" << i));
- }
+ BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
+ BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
+
+ addIndex(firstIndex);
+ addIndex(secondIndex);
+
+ WorkingSet* ws = new WorkingSet();
+ // Sort by c:1
+ MergeSortStageParams msparams;
+ msparams.pattern = BSON("c" << 1);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+
+ // a:1
+ IndexScanParams params;
+ params.descriptor = getIndex(firstIndex, coll);
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = objWithMinKey(1);
+ params.bounds.endKey = objWithMaxKey(1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ // b:51 (EOF)
+ params.descriptor = getIndex(secondIndex, coll);
+ params.bounds.startKey = BSON("" << 51 << "" << MinKey);
+ params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, ms, NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ // Only getting results from the a:1 index scan.
+ for (int i = 0; i < N; ++i) {
+ BSONObj obj;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj["c"].numberInt());
+ ASSERT_EQUALS(1, obj["a"].numberInt());
+ }
- BSONObj firstIndex = BSON("a" << 1 << "c" << 1);
- BSONObj secondIndex = BSON("b" << 1 << "c" << 1);
-
- addIndex(firstIndex);
- addIndex(secondIndex);
-
- WorkingSet* ws = new WorkingSet();
- // Sort by c:1
- MergeSortStageParams msparams;
- msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
-
- // a:1
- IndexScanParams params;
- params.descriptor = getIndex(firstIndex, coll);
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = objWithMinKey(1);
- params.bounds.endKey = objWithMaxKey(1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ // Should be done now.
+ BSONObj foo;
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ }
+};
+
+// N stages each have 1 result
+class QueryStageMergeSortManyShort : public QueryStageMergeSortTestBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- // b:51 (EOF)
- params.descriptor = getIndex(secondIndex, coll);
- params.bounds.startKey = BSON("" << 51 << "" << MinKey);
- params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
+ WorkingSet* ws = new WorkingSet();
+ // Sort by foo:1
+ MergeSortStageParams msparams;
+ msparams.pattern = BSON("foo" << 1);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+
+ IndexScanParams params;
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = objWithMinKey(1);
+ params.bounds.endKey = objWithMaxKey(1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+
+ int numIndices = 20;
+ for (int i = 0; i < numIndices; ++i) {
+ // 'a', 'b', ...
+ string index(1, 'a' + i);
+ insert(BSON(index << 1 << "foo" << i));
+
+ BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
+ addIndex(indexSpec);
+ params.descriptor = getIndex(indexSpec, coll);
ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, new FetchStage(&_txn, ws, ms, NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- // Only getting results from the a:1 index scan.
- for (int i = 0; i < N; ++i) {
- BSONObj obj;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["c"].numberInt());
- ASSERT_EQUALS(1, obj["a"].numberInt());
- }
-
- // Should be done now.
- BSONObj foo;
- ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
}
- };
-
- // N stages each have 1 result
- class QueryStageMergeSortManyShort : public QueryStageMergeSortTestBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- WorkingSet* ws = new WorkingSet();
- // Sort by foo:1
- MergeSortStageParams msparams;
- msparams.pattern = BSON("foo" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
-
- IndexScanParams params;
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = objWithMinKey(1);
- params.bounds.endKey = objWithMaxKey(1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
-
- int numIndices = 20;
- for (int i = 0; i < numIndices; ++i) {
- // 'a', 'b', ...
- string index(1, 'a' + i);
- insert(BSON(index << 1 << "foo" << i));
-
- BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
- addIndex(indexSpec);
- params.descriptor = getIndex(indexSpec, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
- }
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn, ws, new FetchStage(&_txn, ws, ms, NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- for (int i = 0; i < numIndices; ++i) {
- BSONObj obj;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
- ASSERT_EQUALS(i, obj["foo"].numberInt());
- string index(1, 'a' + i);
- ASSERT_EQUALS(1, obj[index].numberInt());
- }
-
- // Should be done now.
- BSONObj foo;
- ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, ms, NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ for (int i = 0; i < numIndices; ++i) {
+ BSONObj obj;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
+ string index(1, 'a' + i);
+ ASSERT_EQUALS(1, obj[index].numberInt());
}
- };
-
- // Invalidation mid-run
- class QueryStageMergeSortInvalidation : public QueryStageMergeSortTestBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
- WorkingSet ws;
- // Sort by foo:1
- MergeSortStageParams msparams;
- msparams.pattern = BSON("foo" << 1);
- auto_ptr<MergeSortStage> ms(new MergeSortStage(msparams, &ws, coll));
-
- IndexScanParams params;
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = objWithMinKey(1);
- params.bounds.endKey = objWithMaxKey(1);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
-
- // Index 'a'+i has foo equal to 'i'.
-
- int numIndices = 20;
- for (int i = 0; i < numIndices; ++i) {
- // 'a', 'b', ...
- string index(1, 'a' + i);
- insert(BSON(index << 1 << "foo" << i));
-
- BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
- addIndex(indexSpec);
- params.descriptor = getIndex(indexSpec, coll);
- ms->addChild(new IndexScan(&_txn, params, &ws, NULL));
- }
+ // Should be done now.
+ BSONObj foo;
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL));
+ }
+};
+
+// Invalidation mid-run
+class QueryStageMergeSortInvalidation : public QueryStageMergeSortTestBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- set<RecordId> locs;
- getLocs(&locs, coll);
+ WorkingSet ws;
+ // Sort by foo:1
+ MergeSortStageParams msparams;
+ msparams.pattern = BSON("foo" << 1);
+ auto_ptr<MergeSortStage> ms(new MergeSortStage(msparams, &ws, coll));
+
+ IndexScanParams params;
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = objWithMinKey(1);
+ params.bounds.endKey = objWithMaxKey(1);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+
+ // Index 'a'+i has foo equal to 'i'.
+
+ int numIndices = 20;
+ for (int i = 0; i < numIndices; ++i) {
+ // 'a', 'b', ...
+ string index(1, 'a' + i);
+ insert(BSON(index << 1 << "foo" << i));
+
+ BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
+ addIndex(indexSpec);
+ params.descriptor = getIndex(indexSpec, coll);
+ ms->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ }
- set<RecordId>::iterator it = locs.begin();
+ set<RecordId> locs;
+ getLocs(&locs, coll);
- // Get 10 results. Should be getting results in order of 'locs'.
- int count = 0;
- while (!ms->isEOF() && count < 10) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ms->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
+ set<RecordId>::iterator it = locs.begin();
- WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(member->loc, *it);
- BSONElement elt;
- string index(1, 'a' + count);
- ASSERT(member->getFieldDotted(index, &elt));
- ASSERT_EQUALS(1, elt.numberInt());
- ASSERT(member->getFieldDotted("foo", &elt));
- ASSERT_EQUALS(count, elt.numberInt());
- ++count;
- ++it;
+ // Get 10 results. Should be getting results in order of 'locs'.
+ int count = 0;
+ while (!ms->isEOF() && count < 10) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ms->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
- // Invalidate locs[11]. Should force a fetch. We don't get it back.
- ms->saveState();
- ms->invalidate(&_txn, *it, INVALIDATION_DELETION);
- ms->restoreState(&_txn);
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(member->loc, *it);
+ BSONElement elt;
+ string index(1, 'a' + count);
+ ASSERT(member->getFieldDotted(index, &elt));
+ ASSERT_EQUALS(1, elt.numberInt());
+ ASSERT(member->getFieldDotted("foo", &elt));
+ ASSERT_EQUALS(count, elt.numberInt());
+ ++count;
+ ++it;
+ }
+
+ // Invalidate locs[11]. Should force a fetch. We don't get it back.
+ ms->saveState();
+ ms->invalidate(&_txn, *it, INVALIDATION_DELETION);
+ ms->restoreState(&_txn);
- // Make sure locs[11] was fetched for us.
- {
+ // Make sure locs[11] was fetched for us.
+ {
// TODO: If we have "return upon invalidation" ever triggerable, do the following test.
/*
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -601,46 +627,47 @@ namespace QueryStageMergeSortTests {
ASSERT_EQUALS(count, elt.numberInt());
*/
- ++it;
- ++count;
- }
-
- // And get the rest.
- while (!ms->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ms->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
+ ++it;
+ ++count;
+ }
- WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(member->loc, *it);
- BSONElement elt;
- string index(1, 'a' + count);
- ASSERT_TRUE(member->getFieldDotted(index, &elt));
- ASSERT_EQUALS(1, elt.numberInt());
- ASSERT(member->getFieldDotted("foo", &elt));
- ASSERT_EQUALS(count, elt.numberInt());
- ++count;
- ++it;
+ // And get the rest.
+ while (!ms->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ms->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "query_stage_merge_sort_test" ) { }
-
- void setupTests() {
- add<QueryStageMergeSortPrefixIndex>();
- add<QueryStageMergeSortDups>();
- add<QueryStageMergeSortDupsNoDedup>();
- add<QueryStageMergeSortPrefixIndexReverse>();
- add<QueryStageMergeSortOneStageEOF>();
- add<QueryStageMergeSortManyShort>();
- add<QueryStageMergeSortInvalidation>();
- }
- };
- SuiteInstance<All> queryStageMergeSortTest;
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(member->loc, *it);
+ BSONElement elt;
+ string index(1, 'a' + count);
+ ASSERT_TRUE(member->getFieldDotted(index, &elt));
+ ASSERT_EQUALS(1, elt.numberInt());
+ ASSERT(member->getFieldDotted("foo", &elt));
+ ASSERT_EQUALS(count, elt.numberInt());
+ ++count;
+ ++it;
+ }
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_merge_sort_test") {}
+
+ void setupTests() {
+ add<QueryStageMergeSortPrefixIndex>();
+ add<QueryStageMergeSortDups>();
+ add<QueryStageMergeSortDupsNoDedup>();
+ add<QueryStageMergeSortPrefixIndexReverse>();
+ add<QueryStageMergeSortOneStageEOF>();
+ add<QueryStageMergeSortManyShort>();
+ add<QueryStageMergeSortInvalidation>();
+ }
+};
+
+SuiteInstance<All> queryStageMergeSortTest;
} // namespace
-
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index f5b9bd9f434..d81ee8f0598 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -39,240 +39,222 @@
namespace {
- using namespace mongo;
- using boost::shared_ptr;
- using std::vector;
-
- /**
- * Stage which takes in an array of BSONObjs and returns them.
- * If the BSONObj is in the form of a Status, returns the Status as a FAILURE.
- */
- class MockStage : public PlanStage {
- public:
-
- MockStage(const vector<BSONObj>& data, WorkingSet* workingSet) :
- _data(data), _pos(0), _workingSet(workingSet), _stats("MOCK_STAGE") {
- }
-
- virtual ~MockStage() {
- }
-
- virtual StageState work(WorkingSetID* out) {
- ++_stats.works;
-
- if (isEOF())
- return PlanStage::IS_EOF;
-
- BSONObj next = _data[_pos++];
-
- if (WorkingSetCommon::isValidStatusMemberObject(next)) {
- Status status = WorkingSetCommon::getMemberObjectStatus(next);
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::FAILURE;
- }
+using namespace mongo;
+using boost::shared_ptr;
+using std::vector;
- *out = _workingSet->allocate();
- WorkingSetMember* member = _workingSet->get(*out);
- member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), next);
-
- return PlanStage::ADVANCED;
- }
-
- virtual bool isEOF() {
- return _pos == static_cast<int>(_data.size());
- }
-
- virtual void saveState() {
- }
+/**
+ * Stage which takes in an array of BSONObjs and returns them.
+ * If the BSONObj is in the form of a Status, returns the Status as a FAILURE.
+ */
+class MockStage : public PlanStage {
+public:
+ MockStage(const vector<BSONObj>& data, WorkingSet* workingSet)
+ : _data(data), _pos(0), _workingSet(workingSet), _stats("MOCK_STAGE") {}
- virtual void restoreState(OperationContext* opCtx) {
- }
+ virtual ~MockStage() {}
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- }
- virtual vector<PlanStage*> getChildren() const {
- return vector<PlanStage*>();
- }
+ virtual StageState work(WorkingSetID* out) {
+ ++_stats.works;
- virtual StageType stageType() const {
- return STAGE_UNKNOWN;
- }
+ if (isEOF())
+ return PlanStage::IS_EOF;
- virtual PlanStageStats* getStats() {
- return new PlanStageStats(_stats, STAGE_UNKNOWN);
- }
+ BSONObj next = _data[_pos++];
- virtual const CommonStats* getCommonStats() {
- return &_stats;
+ if (WorkingSetCommon::isValidStatusMemberObject(next)) {
+ Status status = WorkingSetCommon::getMemberObjectStatus(next);
+ *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
+ return PlanStage::FAILURE;
}
- virtual const SpecificStats* getSpecificStats() {
- return NULL;
- }
+ *out = _workingSet->allocate();
+ WorkingSetMember* member = _workingSet->get(*out);
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), next);
- private:
+ return PlanStage::ADVANCED;
+ }
- vector<BSONObj> _data;
- int _pos;
+ virtual bool isEOF() {
+ return _pos == static_cast<int>(_data.size());
+ }
- // Not owned here
- WorkingSet* const _workingSet;
+ virtual void saveState() {}
- CommonStats _stats;
- };
+ virtual void restoreState(OperationContext* opCtx) {}
- /**
- * Stage which implements a basic distance search, and interprets the "distance" field of
- * fetched documents as the distance.
- */
- class MockNearStage : public NearStage {
- public:
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {}
+ virtual vector<PlanStage*> getChildren() const {
+ return vector<PlanStage*>();
+ }
- struct MockInterval {
+ virtual StageType stageType() const {
+ return STAGE_UNKNOWN;
+ }
- MockInterval(const vector<BSONObj>& data, double min, double max) :
- data(data), min(min), max(max) {
- }
+ virtual PlanStageStats* getStats() {
+ return new PlanStageStats(_stats, STAGE_UNKNOWN);
+ }
- vector<BSONObj> data;
- double min;
- double max;
- };
+ virtual const CommonStats* getCommonStats() {
+ return &_stats;
+ }
- MockNearStage(WorkingSet* workingSet) :
- NearStage(NULL, workingSet, NULL,
- new PlanStageStats(CommonStats("MOCK_DISTANCE_SEARCH_STAGE"), STAGE_UNKNOWN)),
- _pos(0) {
- }
+ virtual const SpecificStats* getSpecificStats() {
+ return NULL;
+ }
- virtual ~MockNearStage() {
- }
+private:
+ vector<BSONObj> _data;
+ int _pos;
- void addInterval(vector<BSONObj> data, double min, double max) {
- _intervals.mutableVector().push_back(new MockInterval(data, min, max));
- }
+ // Not owned here
+ WorkingSet* const _workingSet;
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection) {
+ CommonStats _stats;
+};
- if (_pos == static_cast<int>(_intervals.size()))
- return StatusWith<CoveredInterval*>(NULL);
+/**
+ * Stage which implements a basic distance search, and interprets the "distance" field of
+ * fetched documents as the distance.
+ */
+class MockNearStage : public NearStage {
+public:
+ struct MockInterval {
+ MockInterval(const vector<BSONObj>& data, double min, double max)
+ : data(data), min(min), max(max) {}
+
+ vector<BSONObj> data;
+ double min;
+ double max;
+ };
- const MockInterval& interval = *_intervals.vector()[_pos++];
+ MockNearStage(WorkingSet* workingSet)
+ : NearStage(NULL,
+ workingSet,
+ NULL,
+ new PlanStageStats(CommonStats("MOCK_DISTANCE_SEARCH_STAGE"), STAGE_UNKNOWN)),
+ _pos(0) {}
- bool lastInterval = _pos == static_cast<int>(_intervals.vector().size());
- return StatusWith<CoveredInterval*>(new CoveredInterval(new MockStage(interval.data,
- workingSet),
- true,
- interval.min,
- interval.max,
- lastInterval));
- }
+ virtual ~MockNearStage() {}
- virtual StatusWith<double> computeDistance(WorkingSetMember* member) {
- ASSERT(member->hasObj());
- return StatusWith<double>(member->obj.value()["distance"].numberDouble());
- }
+ void addInterval(vector<BSONObj> data, double min, double max) {
+ _intervals.mutableVector().push_back(new MockInterval(data, min, max));
+ }
- virtual void finishSaveState() { }
+ virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection) {
+ if (_pos == static_cast<int>(_intervals.size()))
+ return StatusWith<CoveredInterval*>(NULL);
+
+ const MockInterval& interval = *_intervals.vector()[_pos++];
+
+ bool lastInterval = _pos == static_cast<int>(_intervals.vector().size());
+ return StatusWith<CoveredInterval*>(
+ new CoveredInterval(new MockStage(interval.data, workingSet),
+ true,
+ interval.min,
+ interval.max,
+ lastInterval));
+ }
- virtual void finishRestoreState(OperationContext* txn) { }
+ virtual StatusWith<double> computeDistance(WorkingSetMember* member) {
+ ASSERT(member->hasObj());
+ return StatusWith<double>(member->obj.value()["distance"].numberDouble());
+ }
- virtual void finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) { }
+ virtual void finishSaveState() {}
- private:
+ virtual void finishRestoreState(OperationContext* txn) {}
- OwnedPointerVector<MockInterval> _intervals;
- int _pos;
- };
+ virtual void finishInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {}
- static vector<BSONObj> advanceStage(PlanStage* stage, WorkingSet* workingSet) {
+private:
+ OwnedPointerVector<MockInterval> _intervals;
+ int _pos;
+};
- vector<BSONObj> results;
+static vector<BSONObj> advanceStage(PlanStage* stage, WorkingSet* workingSet) {
+ vector<BSONObj> results;
- WorkingSetID nextMemberID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
+ WorkingSetID nextMemberID;
+ PlanStage::StageState state = PlanStage::NEED_TIME;
- while (PlanStage::NEED_TIME == state) {
- while (PlanStage::ADVANCED == (state = stage->work(&nextMemberID))) {
- results.push_back(workingSet->get(nextMemberID)->obj.value());
- }
+ while (PlanStage::NEED_TIME == state) {
+ while (PlanStage::ADVANCED == (state = stage->work(&nextMemberID))) {
+ results.push_back(workingSet->get(nextMemberID)->obj.value());
}
-
- return results;
}
- static void assertAscendingAndValid(const vector<BSONObj>& results) {
- double lastDistance = -1.0;
- for (vector<BSONObj>::const_iterator it = results.begin(); it != results.end(); ++it) {
- double distance = (*it)["distance"].numberDouble();
- bool shouldInclude = (*it)["$included"].eoo() || (*it)["$included"].trueValue();
- ASSERT(shouldInclude);
- ASSERT_GREATER_THAN_OR_EQUALS(distance, lastDistance);
- lastDistance = distance;
- }
- }
+ return results;
+}
- TEST(query_stage_near, Basic) {
-
- vector<BSONObj> mockData;
- WorkingSet workingSet;
-
- MockNearStage nearStage(&workingSet);
-
- // First set of results
- mockData.clear();
- mockData.push_back(BSON("distance" << 0.5));
- mockData.push_back(BSON("distance" << 2.0 << "$included" << false)); // Not included
- mockData.push_back(BSON("distance" << 0.0));
- nearStage.addInterval(mockData, 0.0, 1.0);
-
- // Second set of results
- mockData.clear();
- mockData.push_back(BSON("distance" << 1.5));
- mockData.push_back(BSON("distance" << 2.0 << "$included" << false)); // Not included
- mockData.push_back(BSON("distance" << 1.0));
- nearStage.addInterval(mockData, 1.0, 2.0);
-
- // Last set of results
- mockData.clear();
- mockData.push_back(BSON("distance" << 2.5));
- mockData.push_back(BSON("distance" << 3.0)); // Included
- mockData.push_back(BSON("distance" << 2.0));
- nearStage.addInterval(mockData, 2.0, 3.0);
-
- vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
- ASSERT_EQUALS(results.size(), 7u);
- assertAscendingAndValid(results);
+static void assertAscendingAndValid(const vector<BSONObj>& results) {
+ double lastDistance = -1.0;
+ for (vector<BSONObj>::const_iterator it = results.begin(); it != results.end(); ++it) {
+ double distance = (*it)["distance"].numberDouble();
+ bool shouldInclude = (*it)["$included"].eoo() || (*it)["$included"].trueValue();
+ ASSERT(shouldInclude);
+ ASSERT_GREATER_THAN_OR_EQUALS(distance, lastDistance);
+ lastDistance = distance;
}
+}
- TEST(query_stage_near, EmptyResults) {
-
- vector<BSONObj> mockData;
- WorkingSet workingSet;
-
- MockNearStage nearStage(&workingSet);
-
- // Empty set of results
- mockData.clear();
- nearStage.addInterval(mockData, 0.0, 1.0);
+TEST(query_stage_near, Basic) {
+ vector<BSONObj> mockData;
+ WorkingSet workingSet;
+
+ MockNearStage nearStage(&workingSet);
+
+ // First set of results
+ mockData.clear();
+ mockData.push_back(BSON("distance" << 0.5));
+ mockData.push_back(BSON("distance" << 2.0 << "$included" << false)); // Not included
+ mockData.push_back(BSON("distance" << 0.0));
+ nearStage.addInterval(mockData, 0.0, 1.0);
+
+ // Second set of results
+ mockData.clear();
+ mockData.push_back(BSON("distance" << 1.5));
+ mockData.push_back(BSON("distance" << 2.0 << "$included" << false)); // Not included
+ mockData.push_back(BSON("distance" << 1.0));
+ nearStage.addInterval(mockData, 1.0, 2.0);
+
+ // Last set of results
+ mockData.clear();
+ mockData.push_back(BSON("distance" << 2.5));
+ mockData.push_back(BSON("distance" << 3.0)); // Included
+ mockData.push_back(BSON("distance" << 2.0));
+ nearStage.addInterval(mockData, 2.0, 3.0);
+
+ vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
+ ASSERT_EQUALS(results.size(), 7u);
+ assertAscendingAndValid(results);
+}
- // Non-empty sest of results
- mockData.clear();
- mockData.push_back(BSON("distance" << 1.5));
- mockData.push_back(BSON("distance" << 2.0));
- mockData.push_back(BSON("distance" << 1.0));
- nearStage.addInterval(mockData, 1.0, 2.0);
+TEST(query_stage_near, EmptyResults) {
+ vector<BSONObj> mockData;
+ WorkingSet workingSet;
- vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
- ASSERT_EQUALS(results.size(), 3u);
- assertAscendingAndValid(results);
- }
+ MockNearStage nearStage(&workingSet);
+ // Empty set of results
+ mockData.clear();
+ nearStage.addInterval(mockData, 0.0, 1.0);
+ // Non-empty sest of results
+ mockData.clear();
+ mockData.push_back(BSON("distance" << 1.5));
+ mockData.push_back(BSON("distance" << 2.0));
+ mockData.push_back(BSON("distance" << 1.0));
+ nearStage.addInterval(mockData, 1.0, 2.0);
+ vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
+ ASSERT_EQUALS(results.size(), 3u);
+ assertAscendingAndValid(results);
+}
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index dc3d109386e..6d26cb744ed 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -45,379 +45,391 @@
namespace QueryStageSortTests {
- using std::auto_ptr;
- using std::set;
+using std::auto_ptr;
+using std::set;
- class QueryStageSortTestBase {
- public:
- QueryStageSortTestBase() : _client(&_txn) {
-
- }
+class QueryStageSortTestBase {
+public:
+ QueryStageSortTestBase() : _client(&_txn) {}
- void fillData() {
- for (int i = 0; i < numObj(); ++i) {
- insert(BSON("foo" << i));
- }
+ void fillData() {
+ for (int i = 0; i < numObj(); ++i) {
+ insert(BSON("foo" << i));
}
+ }
- virtual ~QueryStageSortTestBase() {
- _client.dropCollection(ns());
- }
+ virtual ~QueryStageSortTestBase() {
+ _client.dropCollection(ns());
+ }
- void insert(const BSONObj& obj) {
- _client.insert(ns(), obj);
- }
+ void insert(const BSONObj& obj) {
+ _client.insert(ns(), obj);
+ }
- void getLocs(set<RecordId>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(&_txn);
- while (!it->isEOF()) {
- RecordId nextLoc = it->getNext();
- out->insert(nextLoc);
- }
- delete it;
+ void getLocs(set<RecordId>* out, Collection* coll) {
+ RecordIterator* it = coll->getIterator(&_txn);
+ while (!it->isEOF()) {
+ RecordId nextLoc = it->getNext();
+ out->insert(nextLoc);
}
-
- /**
- * We feed a mix of (key, unowned, owned) data to the sort stage.
- */
- void insertVarietyOfObjects(QueuedDataStage* ms, Collection* coll) {
- set<RecordId> locs;
- getLocs(&locs, coll);
-
- set<RecordId>::iterator it = locs.begin();
-
- for (int i = 0; i < numObj(); ++i, ++it) {
- ASSERT_FALSE(it == locs.end());
-
- // Insert some owned obj data.
- WorkingSetMember member;
- member.loc = *it;
- member.state = WorkingSetMember::LOC_AND_OBJ;
- member.obj = coll->docFor(&_txn, *it);
- ms->pushBack(member);
- }
+ delete it;
+ }
+
+ /**
+ * We feed a mix of (key, unowned, owned) data to the sort stage.
+ */
+ void insertVarietyOfObjects(QueuedDataStage* ms, Collection* coll) {
+ set<RecordId> locs;
+ getLocs(&locs, coll);
+
+ set<RecordId>::iterator it = locs.begin();
+
+ for (int i = 0; i < numObj(); ++i, ++it) {
+ ASSERT_FALSE(it == locs.end());
+
+ // Insert some owned obj data.
+ WorkingSetMember member;
+ member.loc = *it;
+ member.state = WorkingSetMember::LOC_AND_OBJ;
+ member.obj = coll->docFor(&_txn, *it);
+ ms->pushBack(member);
}
-
- // Return a value in the set {-1, 0, 1} to represent the sign of parameter i. Used to
- // normalize woCompare calls.
- int sgn(int i) {
- if (i == 0)
- return 0;
- return i > 0 ? 1 : -1;
- }
-
- /**
- * A template used by many tests below.
- * Fill out numObj objects, sort them in the order provided by 'direction'.
- * If extAllowed is true, sorting will use use external sorting if available.
- * If limit is not zero, we limit the output of the sort stage to 'limit' results.
- */
- void sortAndCheck(int direction, Collection* coll) {
- WorkingSet* ws = new WorkingSet();
- QueuedDataStage* ms = new QueuedDataStage(ws);
-
- // Insert a mix of the various types of data.
- insertVarietyOfObjects(ms, coll);
-
- SortStageParams params;
- params.collection = coll;
- params.pattern = BSON("foo" << direction);
- params.limit = limit();
-
- // Must fetch so we can look at the doc as a BSONObj.
- PlanExecutor* rawExec;
- Status status =
- PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws,
- new SortStage(params, ws, ms), NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- // Look at pairs of objects to make sure that the sort order is pairwise (and therefore
- // totally) correct.
- BSONObj last;
- ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&last, NULL));
-
- // Count 'last'.
- int count = 1;
-
- BSONObj current;
- while (PlanExecutor::ADVANCED == exec->getNext(&current, NULL)) {
- int cmp = sgn(current.woSortOrder(last, params.pattern));
- // The next object should be equal to the previous or oriented according to the sort
- // pattern.
- ASSERT(cmp == 0 || cmp == 1);
- ++count;
- last = current;
- }
-
- checkCount(count);
+ }
+
+ // Return a value in the set {-1, 0, 1} to represent the sign of parameter i. Used to
+ // normalize woCompare calls.
+ int sgn(int i) {
+ if (i == 0)
+ return 0;
+ return i > 0 ? 1 : -1;
+ }
+
+ /**
+ * A template used by many tests below.
+ * Fill out numObj objects, sort them in the order provided by 'direction'.
+ * If extAllowed is true, sorting will use use external sorting if available.
+ * If limit is not zero, we limit the output of the sort stage to 'limit' results.
+ */
+ void sortAndCheck(int direction, Collection* coll) {
+ WorkingSet* ws = new WorkingSet();
+ QueuedDataStage* ms = new QueuedDataStage(ws);
+
+ // Insert a mix of the various types of data.
+ insertVarietyOfObjects(ms, coll);
+
+ SortStageParams params;
+ params.collection = coll;
+ params.pattern = BSON("foo" << direction);
+ params.limit = limit();
+
+ // Must fetch so we can look at the doc as a BSONObj.
+ PlanExecutor* rawExec;
+ Status status =
+ PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, new SortStage(params, ws, ms), NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ // Look at pairs of objects to make sure that the sort order is pairwise (and therefore
+ // totally) correct.
+ BSONObj last;
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&last, NULL));
+
+ // Count 'last'.
+ int count = 1;
+
+ BSONObj current;
+ while (PlanExecutor::ADVANCED == exec->getNext(&current, NULL)) {
+ int cmp = sgn(current.woSortOrder(last, params.pattern));
+ // The next object should be equal to the previous or oriented according to the sort
+ // pattern.
+ ASSERT(cmp == 0 || cmp == 1);
+ ++count;
+ last = current;
}
- /**
- * Check number of results returned from sort.
- */
- void checkCount(int count) {
- // No limit, should get all objects back.
- // Otherwise, result set should be smaller of limit and input data size.
- if (limit() > 0 && limit() < numObj()) {
- ASSERT_EQUALS(limit(), count);
- }
- else {
- ASSERT_EQUALS(numObj(), count);
- }
+ checkCount(count);
+ }
+
+ /**
+ * Check number of results returned from sort.
+ */
+ void checkCount(int count) {
+ // No limit, should get all objects back.
+ // Otherwise, result set should be smaller of limit and input data size.
+ if (limit() > 0 && limit() < numObj()) {
+ ASSERT_EQUALS(limit(), count);
+ } else {
+ ASSERT_EQUALS(numObj(), count);
}
+ }
- virtual int numObj() = 0;
-
- // Returns sort limit
- // Leave as 0 to disable limit.
- virtual int limit() const { return 0; };
-
-
- static const char* ns() { return "unittests.QueryStageSort"; }
+ virtual int numObj() = 0;
- protected:
- OperationContextImpl _txn;
- DBDirectClient _client;
+ // Returns sort limit
+ // Leave as 0 to disable limit.
+ virtual int limit() const {
+ return 0;
};
- // Sort some small # of results in increasing order.
- class QueryStageSortInc: public QueryStageSortTestBase {
- public:
- virtual int numObj() { return 100; }
+ static const char* ns() {
+ return "unittests.QueryStageSort";
+ }
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+protected:
+ OperationContextImpl _txn;
+ DBDirectClient _client;
+};
- fillData();
- sortAndCheck(1, coll);
- }
- };
- // Sort some small # of results in decreasing order.
- class QueryStageSortDec : public QueryStageSortTestBase {
- public:
- virtual int numObj() { return 100; }
-
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+// Sort some small # of results in increasing order.
+class QueryStageSortInc : public QueryStageSortTestBase {
+public:
+ virtual int numObj() {
+ return 100;
+ }
- fillData();
- sortAndCheck(-1, coll);
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
- // Sort in descreasing order with limit applied
- template <int LIMIT>
- class QueryStageSortDecWithLimit : public QueryStageSortDec {
- public:
- virtual int limit() const {
- return LIMIT;
+ fillData();
+ sortAndCheck(1, coll);
+ }
+};
+
+// Sort some small # of results in decreasing order.
+class QueryStageSortDec : public QueryStageSortTestBase {
+public:
+ virtual int numObj() {
+ return 100;
+ }
+
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
- // Sort a big bunch of objects.
- class QueryStageSortExt : public QueryStageSortTestBase {
- public:
- virtual int numObj() { return 10000; }
-
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+ fillData();
+ sortAndCheck(-1, coll);
+ }
+};
+
+// Sort in descreasing order with limit applied
+template <int LIMIT>
+class QueryStageSortDecWithLimit : public QueryStageSortDec {
+public:
+ virtual int limit() const {
+ return LIMIT;
+ }
+};
+
+// Sort a big bunch of objects.
+class QueryStageSortExt : public QueryStageSortTestBase {
+public:
+ virtual int numObj() {
+ return 10000;
+ }
+
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
+ }
- fillData();
- sortAndCheck(-1, coll);
+ fillData();
+ sortAndCheck(-1, coll);
+ }
+};
+
+// Invalidation of everything fed to sort.
+class QueryStageSortInvalidation : public QueryStageSortTestBase {
+public:
+ virtual int numObj() {
+ return 2000;
+ }
+
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
- // Invalidation of everything fed to sort.
- class QueryStageSortInvalidation : public QueryStageSortTestBase {
- public:
- virtual int numObj() { return 2000; }
-
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
+ fillData();
- fillData();
+ // The data we're going to later invalidate.
+ set<RecordId> locs;
+ getLocs(&locs, coll);
- // The data we're going to later invalidate.
- set<RecordId> locs;
- getLocs(&locs, coll);
+ // Build the mock scan stage which feeds the data.
+ WorkingSet ws;
+ auto_ptr<QueuedDataStage> ms(new QueuedDataStage(&ws));
+ insertVarietyOfObjects(ms.get(), coll);
- // Build the mock scan stage which feeds the data.
- WorkingSet ws;
- auto_ptr<QueuedDataStage> ms(new QueuedDataStage(&ws));
- insertVarietyOfObjects(ms.get(), coll);
+ SortStageParams params;
+ params.collection = coll;
+ params.pattern = BSON("foo" << 1);
+ params.limit = limit();
+ auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get()));
- SortStageParams params;
- params.collection = coll;
- params.pattern = BSON("foo" << 1);
- params.limit = limit();
- auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get()));
+ const int firstRead = 10;
- const int firstRead = 10;
+ // Have sort read in data from the mock stage.
+ for (int i = 0; i < firstRead; ++i) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ss->work(&id);
+ ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status);
+ }
- // Have sort read in data from the mock stage.
- for (int i = 0; i < firstRead; ++i) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ss->work(&id);
- ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status);
- }
+ // We should have read in the first 'firstRead' locs. Invalidate the first.
+ ss->saveState();
+ set<RecordId>::iterator it = locs.begin();
+ ss->invalidate(&_txn, *it++, INVALIDATION_DELETION);
+ ss->restoreState(&_txn);
- // We should have read in the first 'firstRead' locs. Invalidate the first.
- ss->saveState();
- set<RecordId>::iterator it = locs.begin();
- ss->invalidate(&_txn, *it++, INVALIDATION_DELETION);
- ss->restoreState(&_txn);
-
- // Read the rest of the data from the mock stage.
- while (!ms->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- ss->work(&id);
- }
+ // Read the rest of the data from the mock stage.
+ while (!ms->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ ss->work(&id);
+ }
- // Release to prevent double-deletion.
- ms.release();
+ // Release to prevent double-deletion.
+ ms.release();
- // Let's just invalidate everything now.
- ss->saveState();
- while (it != locs.end()) {
- ss->invalidate(&_txn, *it++, INVALIDATION_DELETION);
- }
- ss->restoreState(&_txn);
-
- // Invalidation of data in the sort stage fetches it but passes it through.
- int count = 0;
- while (!ss->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState status = ss->work(&id);
- if (PlanStage::ADVANCED != status) { continue; }
- WorkingSetMember* member = ws.get(id);
- ASSERT(member->hasObj());
- ASSERT(!member->hasLoc());
- ++count;
+ // Let's just invalidate everything now.
+ ss->saveState();
+ while (it != locs.end()) {
+ ss->invalidate(&_txn, *it++, INVALIDATION_DELETION);
+ }
+ ss->restoreState(&_txn);
+
+ // Invalidation of data in the sort stage fetches it but passes it through.
+ int count = 0;
+ while (!ss->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState status = ss->work(&id);
+ if (PlanStage::ADVANCED != status) {
+ continue;
}
-
- // Returns all docs.
- ASSERT_EQUALS(limit() ? limit() : numObj(), count);
+ WorkingSetMember* member = ws.get(id);
+ ASSERT(member->hasObj());
+ ASSERT(!member->hasLoc());
+ ++count;
}
- };
- // Invalidation of everything fed to sort with limit enabled.
- // Limit size of working set within sort stage to a small number
- // Sort stage implementation should not try to invalidate DiskLocc that
- // are no longer in the working set.
- template<int LIMIT>
- class QueryStageSortInvalidationWithLimit : public QueryStageSortInvalidation {
- public:
- virtual int limit() const {
- return LIMIT;
+ // Returns all docs.
+ ASSERT_EQUALS(limit() ? limit() : numObj(), count);
+ }
+};
+
+// Invalidation of everything fed to sort with limit enabled.
+// Limit size of working set within sort stage to a small number
+// Sort stage implementation should not try to invalidate DiskLocc that
+// are no longer in the working set.
+template <int LIMIT>
+class QueryStageSortInvalidationWithLimit : public QueryStageSortInvalidation {
+public:
+ virtual int limit() const {
+ return LIMIT;
+ }
+};
+
+// Should error out if we sort with parallel arrays.
+class QueryStageSortParallelArrays : public QueryStageSortTestBase {
+public:
+ virtual int numObj() {
+ return 100;
+ }
+
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wuow(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wuow.commit();
}
- };
-
- // Should error out if we sort with parallel arrays.
- class QueryStageSortParallelArrays : public QueryStageSortTestBase {
- public:
- virtual int numObj() { return 100; }
-
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
- if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
- wuow.commit();
- }
-
- WorkingSet* ws = new WorkingSet();
- QueuedDataStage* ms = new QueuedDataStage(ws);
- for (int i = 0; i < numObj(); ++i) {
- WorkingSetMember member;
- member.state = WorkingSetMember::OWNED_OBJ;
+ WorkingSet* ws = new WorkingSet();
+ QueuedDataStage* ms = new QueuedDataStage(ws);
- member.obj = Snapshotted<BSONObj>(SnapshotId(),
- fromjson("{a: [1,2,3], b:[1,2,3], c:[1,2,3], d:[1,2,3,4]}"));
- ms->pushBack(member);
+ for (int i = 0; i < numObj(); ++i) {
+ WorkingSetMember member;
+ member.state = WorkingSetMember::OWNED_OBJ;
- member.obj = Snapshotted<BSONObj>(SnapshotId(),
- fromjson("{a:1, b:1, c:1}"));
- ms->pushBack(member);
- }
-
- SortStageParams params;
- params.collection = coll;
- params.pattern = BSON("b" << -1 << "c" << 1 << "a" << 1);
- params.limit = 0;
-
- // We don't get results back since we're sorting some parallel arrays.
- PlanExecutor* rawExec;
- Status status =
- PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn,
- ws,
- new SortStage(params, ws, ms), NULL, coll),
- coll, PlanExecutor::YIELD_MANUAL, &rawExec);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL);
- ASSERT_EQUALS(PlanExecutor::FAILURE, runnerState);
- }
- };
+ member.obj = Snapshotted<BSONObj>(
+ SnapshotId(), fromjson("{a: [1,2,3], b:[1,2,3], c:[1,2,3], d:[1,2,3,4]}"));
+ ms->pushBack(member);
- class All : public Suite {
- public:
- All() : Suite( "query_stage_sort_test" ) { }
-
- void setupTests() {
- add<QueryStageSortInc>();
- add<QueryStageSortDec>();
- // Sort with limit has a general limiting strategy for limit > 1
- add<QueryStageSortDecWithLimit<10> >();
- // and a special case for limit == 1
- add<QueryStageSortDecWithLimit<1> >();
- add<QueryStageSortExt>();
- add<QueryStageSortInvalidation>();
- add<QueryStageSortInvalidationWithLimit<10> >();
- add<QueryStageSortInvalidationWithLimit<1> >();
- add<QueryStageSortParallelArrays>();
+ member.obj = Snapshotted<BSONObj>(SnapshotId(), fromjson("{a:1, b:1, c:1}"));
+ ms->pushBack(member);
}
- };
- SuiteInstance<All> queryStageSortTest;
+ SortStageParams params;
+ params.collection = coll;
+ params.pattern = BSON("b" << -1 << "c" << 1 << "a" << 1);
+ params.limit = 0;
+
+ // We don't get results back since we're sorting some parallel arrays.
+ PlanExecutor* rawExec;
+ Status status =
+ PlanExecutor::make(&_txn,
+ ws,
+ new FetchStage(&_txn, ws, new SortStage(params, ws, ms), NULL, coll),
+ coll,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL);
+ ASSERT_EQUALS(PlanExecutor::FAILURE, runnerState);
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_sort_test") {}
+
+ void setupTests() {
+ add<QueryStageSortInc>();
+ add<QueryStageSortDec>();
+ // Sort with limit has a general limiting strategy for limit > 1
+ add<QueryStageSortDecWithLimit<10>>();
+ // and a special case for limit == 1
+ add<QueryStageSortDecWithLimit<1>>();
+ add<QueryStageSortExt>();
+ add<QueryStageSortInvalidation>();
+ add<QueryStageSortInvalidationWithLimit<10>>();
+ add<QueryStageSortInvalidationWithLimit<1>>();
+ add<QueryStageSortParallelArrays>();
+ }
+};
+
+SuiteInstance<All> queryStageSortTest;
} // namespace
-
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 309e7516952..d7cf869f026 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -39,132 +39,137 @@
namespace QueryStageSubplan {
- class QueryStageSubplanBase {
- public:
- QueryStageSubplanBase()
- : _client(&_txn) { }
-
- virtual ~QueryStageSubplanBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
- }
-
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
- }
+class QueryStageSubplanBase {
+public:
+ QueryStageSubplanBase() : _client(&_txn) {}
- void insert(const BSONObj& doc) {
- _client.insert(ns(), doc);
- }
+ virtual ~QueryStageSubplanBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
- static const char* ns() { return "unittests.QueryStageSubplan"; }
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
- protected:
- OperationContextImpl _txn;
+ void insert(const BSONObj& doc) {
+ _client.insert(ns(), doc);
+ }
- private:
- DBDirectClient _client;
- };
+ static const char* ns() {
+ return "unittests.QueryStageSubplan";
+ }
- /**
- * SERVER-15012: test that the subplan stage does not crash when the winning solution
- * for an $or clause uses a '2d' index. We don't produce cache data for '2d'. The subplanner
- * should gracefully fail after finding that no cache data is available, allowing us to fall
- * back to regular planning.
- */
- class QueryStageSubplanGeo2dOr : public QueryStageSubplanBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
- addIndex(BSON("a" << "2d" << "b" << 1));
- addIndex(BSON("a" << "2d"));
+protected:
+ OperationContextImpl _txn;
- BSONObj query = fromjson("{$or: [{a: {$geoWithin: {$centerSphere: [[0,0],10]}}},"
- "{a: {$geoWithin: {$centerSphere: [[1,1],10]}}}]}");
+private:
+ DBDirectClient _client;
+};
- CanonicalQuery* rawCq;
- ASSERT_OK(CanonicalQuery::canonicalize(ns(), query, &rawCq));
- boost::scoped_ptr<CanonicalQuery> cq(rawCq);
-
- Collection* collection = ctx.getCollection();
+/**
+ * SERVER-15012: test that the subplan stage does not crash when the winning solution
+ * for an $or clause uses a '2d' index. We don't produce cache data for '2d'. The subplanner
+ * should gracefully fail after finding that no cache data is available, allowing us to fall
+ * back to regular planning.
+ */
+class QueryStageSubplanGeo2dOr : public QueryStageSubplanBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+ addIndex(BSON("a"
+ << "2d"
+ << "b" << 1));
+ addIndex(BSON("a"
+ << "2d"));
+
+ BSONObj query = fromjson(
+ "{$or: [{a: {$geoWithin: {$centerSphere: [[0,0],10]}}},"
+ "{a: {$geoWithin: {$centerSphere: [[1,1],10]}}}]}");
+
+ CanonicalQuery* rawCq;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns(), query, &rawCq));
+ boost::scoped_ptr<CanonicalQuery> cq(rawCq);
+
+ Collection* collection = ctx.getCollection();
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+
+ WorkingSet ws;
+ boost::scoped_ptr<SubplanStage> subplan(
+ new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+
+ // NULL means that 'subplan' will not yield during plan selection. Plan selection
+ // should succeed due to falling back on regular planning.
+ ASSERT_OK(subplan->pickBestPlan(NULL));
+ }
+};
- // Get planner params.
- QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+/**
+ * Test the SubplanStage's ability to plan an individual branch using the plan cache.
+ */
+class QueryStageSubplanPlanFromCache : public QueryStageSubplanBase {
+public:
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
- WorkingSet ws;
- boost::scoped_ptr<SubplanStage> subplan(new SubplanStage(&_txn, collection, &ws,
- plannerParams, cq.get()));
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("a" << 1 << "c" << 1));
- // NULL means that 'subplan' will not yield during plan selection. Plan selection
- // should succeed due to falling back on regular planning.
- ASSERT_OK(subplan->pickBestPlan(NULL));
+ for (int i = 0; i < 10; i++) {
+ insert(BSON("a" << 1 << "b" << i << "c" << i));
}
- };
- /**
- * Test the SubplanStage's ability to plan an individual branch using the plan cache.
- */
- class QueryStageSubplanPlanFromCache : public QueryStageSubplanBase {
- public:
- void run() {
- Client::WriteContext ctx(&_txn, ns());
+ // This query should result in a plan cache entry for the first branch. The second
+ // branch should tie, meaning that nothing is inserted into the plan cache.
+ BSONObj query = fromjson("{$or: [{a: 1, b: 3}, {a: 1}]}");
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("a" << 1 << "c" << 1));
+ Collection* collection = ctx.getCollection();
- for (int i = 0; i < 10; i++) {
- insert(BSON("a" << 1 << "b" << i << "c" << i));
- }
+ CanonicalQuery* rawCq;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns(), query, &rawCq));
+ boost::scoped_ptr<CanonicalQuery> cq(rawCq);
- // This query should result in a plan cache entry for the first branch. The second
- // branch should tie, meaning that nothing is inserted into the plan cache.
- BSONObj query = fromjson("{$or: [{a: 1, b: 3}, {a: 1}]}");
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
- Collection* collection = ctx.getCollection();
+ WorkingSet ws;
+ boost::scoped_ptr<SubplanStage> subplan(
+ new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
- CanonicalQuery* rawCq;
- ASSERT_OK(CanonicalQuery::canonicalize(ns(), query, &rawCq));
- boost::scoped_ptr<CanonicalQuery> cq(rawCq);
+ // NULL means that 'subplan' should not yield during plan selection.
+ ASSERT_OK(subplan->pickBestPlan(NULL));
- // Get planner params.
- QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ // Nothing is in the cache yet, so neither branch should have been planned from
+ // the plan cache.
+ ASSERT_FALSE(subplan->branchPlannedFromCache(0));
+ ASSERT_FALSE(subplan->branchPlannedFromCache(1));
- WorkingSet ws;
- boost::scoped_ptr<SubplanStage> subplan(new SubplanStage(&_txn, collection, &ws,
- plannerParams, cq.get()));
+ // If we repeat the same query, then the first branch should come from the cache,
+ // but the second is re-planned due to tying on the first run.
+ ws.clear();
+ subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
- // NULL means that 'subplan' should not yield during plan selection.
- ASSERT_OK(subplan->pickBestPlan(NULL));
+ ASSERT_OK(subplan->pickBestPlan(NULL));
- // Nothing is in the cache yet, so neither branch should have been planned from
- // the plan cache.
- ASSERT_FALSE(subplan->branchPlannedFromCache(0));
- ASSERT_FALSE(subplan->branchPlannedFromCache(1));
+ ASSERT_TRUE(subplan->branchPlannedFromCache(0));
+ ASSERT_FALSE(subplan->branchPlannedFromCache(1));
+ }
+};
- // If we repeat the same query, then the first branch should come from the cache,
- // but the second is re-planned due to tying on the first run.
- ws.clear();
- subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+class All : public Suite {
+public:
+ All() : Suite("query_stage_subplan") {}
- ASSERT_OK(subplan->pickBestPlan(NULL));
-
- ASSERT_TRUE(subplan->branchPlannedFromCache(0));
- ASSERT_FALSE(subplan->branchPlannedFromCache(1));
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite("query_stage_subplan") {}
-
- void setupTests() {
- add<QueryStageSubplanGeo2dOr>();
- add<QueryStageSubplanPlanFromCache>();
- }
- };
+ void setupTests() {
+ add<QueryStageSubplanGeo2dOr>();
+ add<QueryStageSubplanPlanFromCache>();
+ }
+};
- SuiteInstance<All> all;
+SuiteInstance<All> all;
-} // namespace QueryStageSubplan
+} // namespace QueryStageSubplan
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 71490621109..ea9253bf77f 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -44,192 +44,196 @@
namespace QueryStageTests {
- using std::auto_ptr;
-
- class IndexScanBase {
- public:
- IndexScanBase() : _client(&_txn) {
- Client::WriteContext ctx(&_txn, ns());
-
- for (int i = 0; i < numObj(); ++i) {
- BSONObjBuilder bob;
- bob.append("foo", i);
- bob.append("baz", i);
- bob.append("bar", numObj() - i);
- _client.insert(ns(), bob.obj());
- }
-
- addIndex(BSON("foo" << 1));
- addIndex(BSON("foo" << 1 << "baz" << 1));
+using std::auto_ptr;
+
+class IndexScanBase {
+public:
+ IndexScanBase() : _client(&_txn) {
+ Client::WriteContext ctx(&_txn, ns());
+
+ for (int i = 0; i < numObj(); ++i) {
+ BSONObjBuilder bob;
+ bob.append("foo", i);
+ bob.append("baz", i);
+ bob.append("bar", numObj() - i);
+ _client.insert(ns(), bob.obj());
}
- virtual ~IndexScanBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("foo" << 1 << "baz" << 1));
+ }
+
+ virtual ~IndexScanBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
+
+ void addIndex(const BSONObj& obj) {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ }
+
+ int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
+ verify(swme.isOK());
+ auto_ptr<MatchExpression> filterExpr(swme.getValue());
+
+ WorkingSet* ws = new WorkingSet();
+
+ PlanExecutor* rawExec;
+ Status status = PlanExecutor::make(&_txn,
+ ws,
+ new IndexScan(&_txn, params, ws, filterExpr.get()),
+ ctx.getCollection(),
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ ASSERT_OK(status);
+ boost::scoped_ptr<PlanExecutor> exec(rawExec);
+
+ int count = 0;
+ for (RecordId dl; PlanExecutor::ADVANCED == exec->getNext(NULL, &dl);) {
+ ++count;
}
- void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
- }
-
- int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
- AutoGetCollectionForRead ctx(&_txn, ns());
-
- StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
- verify(swme.isOK());
- auto_ptr<MatchExpression> filterExpr(swme.getValue());
-
- WorkingSet* ws = new WorkingSet();
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new IndexScan(&_txn, params, ws, filterExpr.get()),
- ctx.getCollection(),
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- boost::scoped_ptr<PlanExecutor> exec(rawExec);
-
- int count = 0;
- for (RecordId dl; PlanExecutor::ADVANCED == exec->getNext(NULL, &dl); ) {
- ++count;
- }
+ return count;
+ }
- return count;
- }
-
- void makeGeoData() {
- Client::WriteContext ctx(&_txn, ns());
-
- for (int i = 0; i < numObj(); ++i) {
- double lat = double(rand()) / RAND_MAX;
- double lng = double(rand()) / RAND_MAX;
- _client.insert(ns(), BSON("geo" << BSON_ARRAY(lng << lat)));
- }
- }
+ void makeGeoData() {
+ Client::WriteContext ctx(&_txn, ns());
- IndexDescriptor* getIndex(const BSONObj& obj) {
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
- return collection->getIndexCatalog()->findIndexByKeyPattern( &_txn, obj );
+ for (int i = 0; i < numObj(); ++i) {
+ double lat = double(rand()) / RAND_MAX;
+ double lng = double(rand()) / RAND_MAX;
+ _client.insert(ns(), BSON("geo" << BSON_ARRAY(lng << lat)));
}
-
- static int numObj() { return 50; }
- static const char* ns() { return "unittests.IndexScan"; }
-
- protected:
- OperationContextImpl _txn;
-
- private:
- DBDirectClient _client;
- };
-
- class QueryStageIXScanBasic : public IndexScanBase {
- public:
- virtual ~QueryStageIXScanBasic() { }
-
- void run() {
- // foo <= 20
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1));
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSONObj();
- params.bounds.endKeyInclusive = true;
- params.direction = -1;
-
- ASSERT_EQUALS(countResults(params), 21);
- }
- };
-
- class QueryStageIXScanLowerUpper : public IndexScanBase {
- public:
- virtual ~QueryStageIXScanLowerUpper() { }
-
- void run() {
- // 20 <= foo < 30
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1));
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSON("" << 30);
- params.bounds.endKeyInclusive = false;
- params.direction = 1;
-
- ASSERT_EQUALS(countResults(params), 10);
- }
- };
-
- class QueryStageIXScanLowerUpperIncl : public IndexScanBase {
- public:
- virtual ~QueryStageIXScanLowerUpperIncl() { }
-
- void run() {
- // 20 <= foo <= 30
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1));
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSON("" << 30);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
-
- ASSERT_EQUALS(countResults(params), 11);
- }
- };
-
- class QueryStageIXScanLowerUpperInclFilter : public IndexScanBase {
- public:
- virtual ~QueryStageIXScanLowerUpperInclFilter() { }
-
- void run() {
- // 20 <= foo < 30
- // foo == 25
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1));
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSON("" << 30);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
-
- ASSERT_EQUALS(countResults(params, BSON("foo" << 25)), 1);
- }
- };
-
- class QueryStageIXScanCantMatch : public IndexScanBase {
- public:
- virtual ~QueryStageIXScanCantMatch() { }
-
- void run() {
- // 20 <= foo < 30
- // bar == 25 (not covered, should error.)
- IndexScanParams params;
- params.descriptor = getIndex(BSON("foo" << 1));
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = BSON("" << 20);
- params.bounds.endKey = BSON("" << 30);
- params.bounds.endKeyInclusive = true;
- params.direction = 1;
-
- ASSERT_THROWS(countResults(params, BSON("baz" << 25)), MsgAssertionException);
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "query_stage_tests" ) { }
-
- void setupTests() {
- add<QueryStageIXScanBasic>();
- add<QueryStageIXScanLowerUpper>();
- add<QueryStageIXScanLowerUpperIncl>();
- add<QueryStageIXScanLowerUpperInclFilter>();
- add<QueryStageIXScanCantMatch>();
- }
- };
-
- SuiteInstance<All> queryStageTestsAll;
+ }
+
+ IndexDescriptor* getIndex(const BSONObj& obj) {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+ return collection->getIndexCatalog()->findIndexByKeyPattern(&_txn, obj);
+ }
+
+ static int numObj() {
+ return 50;
+ }
+ static const char* ns() {
+ return "unittests.IndexScan";
+ }
+
+protected:
+ OperationContextImpl _txn;
+
+private:
+ DBDirectClient _client;
+};
+
+class QueryStageIXScanBasic : public IndexScanBase {
+public:
+ virtual ~QueryStageIXScanBasic() {}
+
+ void run() {
+ // foo <= 20
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1));
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSONObj();
+ params.bounds.endKeyInclusive = true;
+ params.direction = -1;
+
+ ASSERT_EQUALS(countResults(params), 21);
+ }
+};
+
+class QueryStageIXScanLowerUpper : public IndexScanBase {
+public:
+ virtual ~QueryStageIXScanLowerUpper() {}
+
+ void run() {
+ // 20 <= foo < 30
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1));
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSON("" << 30);
+ params.bounds.endKeyInclusive = false;
+ params.direction = 1;
+
+ ASSERT_EQUALS(countResults(params), 10);
+ }
+};
+
+class QueryStageIXScanLowerUpperIncl : public IndexScanBase {
+public:
+ virtual ~QueryStageIXScanLowerUpperIncl() {}
+
+ void run() {
+ // 20 <= foo <= 30
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1));
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSON("" << 30);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+
+ ASSERT_EQUALS(countResults(params), 11);
+ }
+};
+
+class QueryStageIXScanLowerUpperInclFilter : public IndexScanBase {
+public:
+ virtual ~QueryStageIXScanLowerUpperInclFilter() {}
+
+ void run() {
+ // 20 <= foo < 30
+ // foo == 25
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1));
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSON("" << 30);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+
+ ASSERT_EQUALS(countResults(params, BSON("foo" << 25)), 1);
+ }
+};
+
+class QueryStageIXScanCantMatch : public IndexScanBase {
+public:
+ virtual ~QueryStageIXScanCantMatch() {}
+
+ void run() {
+ // 20 <= foo < 30
+ // bar == 25 (not covered, should error.)
+ IndexScanParams params;
+ params.descriptor = getIndex(BSON("foo" << 1));
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = BSON("" << 20);
+ params.bounds.endKey = BSON("" << 30);
+ params.bounds.endKeyInclusive = true;
+ params.direction = 1;
+
+ ASSERT_THROWS(countResults(params, BSON("baz" << 25)), MsgAssertionException);
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_tests") {}
+
+ void setupTests() {
+ add<QueryStageIXScanBasic>();
+ add<QueryStageIXScanLowerUpper>();
+ add<QueryStageIXScanLowerUpperIncl>();
+ add<QueryStageIXScanLowerUpperInclFilter>();
+ add<QueryStageIXScanCantMatch>();
+ }
+};
+
+SuiteInstance<All> queryStageTestsAll;
} // namespace
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 305cd1a1e71..8245ba85304 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -50,320 +50,322 @@
namespace QueryStageUpdate {
- using boost::scoped_ptr;
- using std::auto_ptr;
- using std::vector;
-
- class QueryStageUpdateBase {
- public:
- QueryStageUpdateBase()
- : _client(&_txn),
- _ns("unittests.QueryStageUpdate"),
- _nsString(StringData(ns())) {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
- _client.createCollection(ns());
- }
+using boost::scoped_ptr;
+using std::auto_ptr;
+using std::vector;
+
+class QueryStageUpdateBase {
+public:
+ QueryStageUpdateBase()
+ : _client(&_txn), _ns("unittests.QueryStageUpdate"), _nsString(StringData(ns())) {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ _client.createCollection(ns());
+ }
+
+ virtual ~QueryStageUpdateBase() {
+ Client::WriteContext ctx(&_txn, ns());
+ _client.dropCollection(ns());
+ }
+
+ void insert(const BSONObj& doc) {
+ _client.insert(ns(), doc);
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ size_t count(const BSONObj& query) {
+ return _client.count(ns(), query, 0, 0, 0);
+ }
+
+ CanonicalQuery* canonicalize(const BSONObj& query) {
+ CanonicalQuery* cq;
+ Status status = CanonicalQuery::canonicalize(ns(), query, &cq);
+ ASSERT_OK(status);
+ return cq;
+ }
- virtual ~QueryStageUpdateBase() {
- Client::WriteContext ctx(&_txn, ns());
- _client.dropCollection(ns());
+ /**
+ * Runs the update operation by calling work until EOF. Asserts that
+ * the update stage always returns NEED_TIME.
+ */
+ void runUpdate(UpdateStage* updateStage) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = PlanStage::NEED_TIME;
+ while (PlanStage::IS_EOF != state) {
+ ASSERT_EQUALS(PlanStage::NEED_TIME, state);
+ state = updateStage->work(&id);
}
+ }
- void insert(const BSONObj& doc) {
- _client.insert(ns(), doc);
- }
+ /**
+ * Returns a vector of all of the documents currently in 'collection'.
+ *
+ * Uses a forward collection scan stage to get the docs, and populates 'out' with
+ * the results.
+ */
+ void getCollContents(Collection* collection, vector<BSONObj>* out) {
+ WorkingSet ws;
- void remove(const BSONObj& obj) {
- _client.remove(ns(), obj);
- }
+ CollectionScanParams params;
+ params.collection = collection;
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
- size_t count(const BSONObj& query) {
- return _client.count(ns(), query, 0, 0, 0);
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ verify(member->hasObj());
+ out->push_back(member->obj.value());
+ }
}
+ }
- CanonicalQuery* canonicalize(const BSONObj& query) {
- CanonicalQuery* cq;
- Status status = CanonicalQuery::canonicalize(ns(), query, &cq);
- ASSERT_OK(status);
- return cq;
- }
+ void getLocs(Collection* collection,
+ CollectionScanParams::Direction direction,
+ vector<RecordId>* out) {
+ WorkingSet ws;
- /**
- * Runs the update operation by calling work until EOF. Asserts that
- * the update stage always returns NEED_TIME.
- */
- void runUpdate(UpdateStage* updateStage) {
+ CollectionScanParams params;
+ params.collection = collection;
+ params.direction = direction;
+ params.tailable = false;
+
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (PlanStage::IS_EOF != state) {
- ASSERT_EQUALS(PlanStage::NEED_TIME, state);
- state = updateStage->work(&id);
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ verify(member->hasLoc());
+ out->push_back(member->loc);
}
}
+ }
- /**
- * Returns a vector of all of the documents currently in 'collection'.
- *
- * Uses a forward collection scan stage to get the docs, and populates 'out' with
- * the results.
- */
- void getCollContents(Collection* collection, vector<BSONObj>* out) {
- WorkingSet ws;
-
- CollectionScanParams params;
- params.collection = collection;
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = false;
-
- scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
- while (!scan->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- verify(member->hasObj());
- out->push_back(member->obj.value());
- }
+ /**
+ * Asserts that 'objs' contains 'expectedDoc'.
+ */
+ void assertHasDoc(const vector<BSONObj>& objs, const BSONObj& expectedDoc) {
+ bool foundDoc = false;
+ for (size_t i = 0; i < objs.size(); i++) {
+ if (0 == objs[i].woCompare(expectedDoc)) {
+ foundDoc = true;
+ break;
}
}
+ ASSERT(foundDoc);
+ }
- void getLocs(Collection* collection,
- CollectionScanParams::Direction direction,
- vector<RecordId>* out) {
- WorkingSet ws;
+ const char* ns() {
+ return _ns.c_str();
+ }
- CollectionScanParams params;
- params.collection = collection;
- params.direction = direction;
- params.tailable = false;
+ const NamespaceString& nsString() {
+ return _nsString;
+ }
- scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
- while (!scan->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = scan->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = ws.get(id);
- verify(member->hasLoc());
- out->push_back(member->loc);
- }
- }
- }
+protected:
+ OperationContextImpl _txn;
- /**
- * Asserts that 'objs' contains 'expectedDoc'.
- */
- void assertHasDoc(const vector<BSONObj>& objs, const BSONObj& expectedDoc) {
- bool foundDoc = false;
- for (size_t i = 0; i < objs.size(); i++) {
- if (0 == objs[i].woCompare(expectedDoc)) {
- foundDoc = true;
- break;
- }
- }
- ASSERT(foundDoc);
- }
+private:
+ DBDirectClient _client;
- const char* ns() { return _ns.c_str(); }
+ std::string _ns;
+ NamespaceString _nsString;
+};
- const NamespaceString& nsString() { return _nsString; }
+/**
+ * Test an upsert into an empty collection.
+ */
+class QueryStageUpdateUpsertEmptyColl : public QueryStageUpdateBase {
+public:
+ void run() {
+ // Run the update.
+ {
+ Client::WriteContext ctx(&_txn, ns());
+ Client& c = cc();
+ CurOp& curOp = *c.curop();
+ OpDebug* opDebug = &curOp.debug();
+ UpdateDriver driver((UpdateDriver::Options()));
+ Collection* collection = ctx.getCollection();
- protected:
- OperationContextImpl _txn;
+ // Collection should be empty.
+ ASSERT_EQUALS(0U, count(BSONObj()));
- private:
- DBDirectClient _client;
+ UpdateRequest request(nsString());
+ UpdateLifecycleImpl updateLifecycle(false, nsString());
+ request.setLifecycle(&updateLifecycle);
- std::string _ns;
- NamespaceString _nsString;
- };
+ // Update is the upsert {_id: 0, x: 1}, {$set: {y: 2}}.
+ BSONObj query = fromjson("{_id: 0, x: 1}");
+ BSONObj updates = fromjson("{$set: {y: 2}}");
- /**
- * Test an upsert into an empty collection.
- */
- class QueryStageUpdateUpsertEmptyColl : public QueryStageUpdateBase {
- public:
- void run() {
- // Run the update.
- {
- Client::WriteContext ctx(&_txn, ns());
- Client& c = cc();
- CurOp& curOp = *c.curop();
- OpDebug* opDebug = &curOp.debug();
- UpdateDriver driver( (UpdateDriver::Options()) );
- Collection* collection = ctx.getCollection();
-
- // Collection should be empty.
- ASSERT_EQUALS(0U, count(BSONObj()));
-
- UpdateRequest request(nsString());
- UpdateLifecycleImpl updateLifecycle(false, nsString());
- request.setLifecycle(&updateLifecycle);
-
- // Update is the upsert {_id: 0, x: 1}, {$set: {y: 2}}.
- BSONObj query = fromjson("{_id: 0, x: 1}");
- BSONObj updates = fromjson("{$set: {y: 2}}");
-
- request.setUpsert();
- request.setQuery(query);
- request.setUpdates(updates);
-
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
-
- // Setup update params.
- UpdateStageParams params(&request, &driver, opDebug);
- scoped_ptr<CanonicalQuery> cq(canonicalize(query));
- params.canonicalQuery = cq.get();
-
- scoped_ptr<WorkingSet> ws(new WorkingSet());
- auto_ptr<EOFStage> eofStage(new EOFStage());
-
- scoped_ptr<UpdateStage> updateStage(
- new UpdateStage(&_txn, params, ws.get(), collection, eofStage.release()));
-
- runUpdate(updateStage.get());
- }
+ request.setUpsert();
+ request.setQuery(query);
+ request.setUpdates(updates);
- // Verify the contents of the resulting collection.
- {
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
+ ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
- vector<BSONObj> objs;
- getCollContents(collection, &objs);
+ // Setup update params.
+ UpdateStageParams params(&request, &driver, opDebug);
+ scoped_ptr<CanonicalQuery> cq(canonicalize(query));
+ params.canonicalQuery = cq.get();
- // Expect a single document, {_id: 0, x: 1, y: 2}.
- ASSERT_EQUALS(1U, objs.size());
- ASSERT_EQUALS(objs[0], fromjson("{_id: 0, x: 1, y: 2}"));
- }
+ scoped_ptr<WorkingSet> ws(new WorkingSet());
+ auto_ptr<EOFStage> eofStage(new EOFStage());
+
+ scoped_ptr<UpdateStage> updateStage(
+ new UpdateStage(&_txn, params, ws.get(), collection, eofStage.release()));
+
+ runUpdate(updateStage.get());
}
- };
- /**
- * Test receipt of an invalidation: case in which the document about to updated
- * is deleted.
- */
- class QueryStageUpdateSkipInvalidatedDoc : public QueryStageUpdateBase {
- public:
- void run() {
- // Run the update.
- {
- Client::WriteContext ctx(&_txn, ns());
-
- // Populate the collection.
- for (int i = 0; i < 10; ++i) {
- insert(BSON("_id" << i << "foo" << i));
- }
- ASSERT_EQUALS(10U, count(BSONObj()));
-
- Client& c = cc();
- CurOp& curOp = *c.curop();
- OpDebug* opDebug = &curOp.debug();
- UpdateDriver driver( (UpdateDriver::Options()) );
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
-
- // Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> locs;
- getLocs(coll, CollectionScanParams::FORWARD, &locs);
-
- UpdateRequest request(nsString());
- UpdateLifecycleImpl updateLifecycle(false, nsString());
- request.setLifecycle(&updateLifecycle);
-
- // Update is a multi-update that sets 'bar' to 3 in every document
- // where foo is less than 5.
- BSONObj query = fromjson("{foo: {$lt: 5}}");
- BSONObj updates = fromjson("{$set: {bar: 3}}");
-
- request.setMulti();
- request.setQuery(query);
- request.setUpdates(updates);
-
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
-
- // Configure the scan.
- CollectionScanParams collScanParams;
- collScanParams.collection = coll;
- collScanParams.direction = CollectionScanParams::FORWARD;
- collScanParams.tailable = false;
-
- // Configure the update.
- UpdateStageParams updateParams(&request, &driver, opDebug);
- scoped_ptr<CanonicalQuery> cq(canonicalize(query));
- updateParams.canonicalQuery = cq.get();
-
- scoped_ptr<WorkingSet> ws(new WorkingSet());
- auto_ptr<CollectionScan> cs(
- new CollectionScan(&_txn, collScanParams, ws.get(), cq->root()));
-
- scoped_ptr<UpdateStage> updateStage(
- new UpdateStage(&_txn, updateParams, ws.get(), coll, cs.release()));
-
- const UpdateStats* stats =
- static_cast<const UpdateStats*>(updateStage->getSpecificStats());
-
- const size_t targetDocIndex = 3;
-
- while (stats->nModified < targetDocIndex) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = updateStage->work(&id);
- ASSERT_EQUALS(PlanStage::NEED_TIME, state);
- }
-
- // Remove locs[targetDocIndex];
- updateStage->saveState();
- updateStage->invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
- BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
- ASSERT(!targetDoc.isEmpty());
- remove(targetDoc);
- updateStage->restoreState(&_txn);
-
- // Do the remaining updates.
- while (!updateStage->isEOF()) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = updateStage->work(&id);
- ASSERT(PlanStage::NEED_TIME == state || PlanStage::IS_EOF == state);
- }
-
- // 4 of the 5 matching documents should have been modified (one was deleted).
- ASSERT_EQUALS(4U, stats->nModified);
- ASSERT_EQUALS(4U, stats->nMatched);
+ // Verify the contents of the resulting collection.
+ {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+
+ vector<BSONObj> objs;
+ getCollContents(collection, &objs);
+
+ // Expect a single document, {_id: 0, x: 1, y: 2}.
+ ASSERT_EQUALS(1U, objs.size());
+ ASSERT_EQUALS(objs[0], fromjson("{_id: 0, x: 1, y: 2}"));
+ }
+ }
+};
+
+/**
+ * Test receipt of an invalidation: case in which the document about to updated
+ * is deleted.
+ */
+class QueryStageUpdateSkipInvalidatedDoc : public QueryStageUpdateBase {
+public:
+ void run() {
+ // Run the update.
+ {
+ Client::WriteContext ctx(&_txn, ns());
+
+ // Populate the collection.
+ for (int i = 0; i < 10; ++i) {
+ insert(BSON("_id" << i << "foo" << i));
}
+ ASSERT_EQUALS(10U, count(BSONObj()));
+
+ Client& c = cc();
+ CurOp& curOp = *c.curop();
+ OpDebug* opDebug = &curOp.debug();
+ UpdateDriver driver((UpdateDriver::Options()));
+ Database* db = ctx.ctx().db();
+ Collection* coll = db->getCollection(ns());
+
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
+ getLocs(coll, CollectionScanParams::FORWARD, &locs);
+
+ UpdateRequest request(nsString());
+ UpdateLifecycleImpl updateLifecycle(false, nsString());
+ request.setLifecycle(&updateLifecycle);
+
+ // Update is a multi-update that sets 'bar' to 3 in every document
+ // where foo is less than 5.
+ BSONObj query = fromjson("{foo: {$lt: 5}}");
+ BSONObj updates = fromjson("{$set: {bar: 3}}");
+
+ request.setMulti();
+ request.setQuery(query);
+ request.setUpdates(updates);
- // Check the contents of the collection.
- {
- AutoGetCollectionForRead ctx(&_txn, ns());
- Collection* collection = ctx.getCollection();
+ ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
- vector<BSONObj> objs;
- getCollContents(collection, &objs);
+ // Configure the scan.
+ CollectionScanParams collScanParams;
+ collScanParams.collection = coll;
+ collScanParams.direction = CollectionScanParams::FORWARD;
+ collScanParams.tailable = false;
- // Verify that the collection now has 9 docs (one was deleted).
- ASSERT_EQUALS(9U, objs.size());
+ // Configure the update.
+ UpdateStageParams updateParams(&request, &driver, opDebug);
+ scoped_ptr<CanonicalQuery> cq(canonicalize(query));
+ updateParams.canonicalQuery = cq.get();
- // Make sure that the collection has certain documents.
- assertHasDoc(objs, fromjson("{_id: 0, foo: 0, bar: 3}"));
- assertHasDoc(objs, fromjson("{_id: 1, foo: 1, bar: 3}"));
- assertHasDoc(objs, fromjson("{_id: 2, foo: 2, bar: 3}"));
- assertHasDoc(objs, fromjson("{_id: 4, foo: 4, bar: 3}"));
- assertHasDoc(objs, fromjson("{_id: 5, foo: 5}"));
- assertHasDoc(objs, fromjson("{_id: 6, foo: 6}"));
+ scoped_ptr<WorkingSet> ws(new WorkingSet());
+ auto_ptr<CollectionScan> cs(
+ new CollectionScan(&_txn, collScanParams, ws.get(), cq->root()));
+
+ scoped_ptr<UpdateStage> updateStage(
+ new UpdateStage(&_txn, updateParams, ws.get(), coll, cs.release()));
+
+ const UpdateStats* stats =
+ static_cast<const UpdateStats*>(updateStage->getSpecificStats());
+
+ const size_t targetDocIndex = 3;
+
+ while (stats->nModified < targetDocIndex) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = updateStage->work(&id);
+ ASSERT_EQUALS(PlanStage::NEED_TIME, state);
}
+
+ // Remove locs[targetDocIndex];
+ updateStage->saveState();
+ updateStage->invalidate(&_txn, locs[targetDocIndex], INVALIDATION_DELETION);
+ BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]).value();
+ ASSERT(!targetDoc.isEmpty());
+ remove(targetDoc);
+ updateStage->restoreState(&_txn);
+
+ // Do the remaining updates.
+ while (!updateStage->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = updateStage->work(&id);
+ ASSERT(PlanStage::NEED_TIME == state || PlanStage::IS_EOF == state);
+ }
+
+ // 4 of the 5 matching documents should have been modified (one was deleted).
+ ASSERT_EQUALS(4U, stats->nModified);
+ ASSERT_EQUALS(4U, stats->nMatched);
}
- };
- class All : public Suite {
- public:
- All() : Suite("query_stage_update") {}
+ // Check the contents of the collection.
+ {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
+
+ vector<BSONObj> objs;
+ getCollContents(collection, &objs);
- void setupTests() {
- // Stage-specific tests below.
- add<QueryStageUpdateUpsertEmptyColl>();
- add<QueryStageUpdateSkipInvalidatedDoc>();
+ // Verify that the collection now has 9 docs (one was deleted).
+ ASSERT_EQUALS(9U, objs.size());
+
+ // Make sure that the collection has certain documents.
+ assertHasDoc(objs, fromjson("{_id: 0, foo: 0, bar: 3}"));
+ assertHasDoc(objs, fromjson("{_id: 1, foo: 1, bar: 3}"));
+ assertHasDoc(objs, fromjson("{_id: 2, foo: 2, bar: 3}"));
+ assertHasDoc(objs, fromjson("{_id: 4, foo: 4, bar: 3}"));
+ assertHasDoc(objs, fromjson("{_id: 5, foo: 5}"));
+ assertHasDoc(objs, fromjson("{_id: 6, foo: 6}"));
}
- };
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query_stage_update") {}
+
+ void setupTests() {
+ // Stage-specific tests below.
+ add<QueryStageUpdateUpsertEmptyColl>();
+ add<QueryStageUpdateSkipInvalidatedDoc>();
+ }
+};
- SuiteInstance<All> all;
+SuiteInstance<All> all;
-} // namespace QueryStageUpdate
+} // namespace QueryStageUpdate
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index b2b79021ad7..b53f7166218 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -49,1644 +49,1763 @@
#include "mongo/util/timer.h"
namespace mongo {
- void assembleRequest( const std::string &ns, BSONObj query, int nToReturn, int nToSkip,
- const BSONObj *fieldsToReturn, int queryOptions, Message &toSend );
+void assembleRequest(const std::string& ns,
+ BSONObj query,
+ int nToReturn,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ Message& toSend);
}
namespace QueryTests {
- using std::auto_ptr;
- using std::cout;
- using std::endl;
- using std::string;
- using std::vector;
-
- class Base {
- public:
- Base() : _scopedXact(&_txn, MODE_X),
- _lk(_txn.lockState()),
- _context(&_txn, ns()) {
-
- {
- WriteUnitOfWork wunit(&_txn);
- _database = _context.db();
- _collection = _database->getCollection( ns() );
- if ( _collection ) {
- _database->dropCollection( &_txn, ns() );
- }
- _collection = _database->createCollection( &_txn, ns() );
- wunit.commit();
- }
-
- addIndex( fromjson( "{\"a\":1}" ) );
- }
+using std::auto_ptr;
+using std::cout;
+using std::endl;
+using std::string;
+using std::vector;
- ~Base() {
- try {
- WriteUnitOfWork wunit(&_txn);
- uassertStatusOK( _database->dropCollection( &_txn, ns() ) );
- wunit.commit();
- }
- catch ( ... ) {
- FAIL( "Exception while cleaning up collection" );
+class Base {
+public:
+ Base() : _scopedXact(&_txn, MODE_X), _lk(_txn.lockState()), _context(&_txn, ns()) {
+ {
+ WriteUnitOfWork wunit(&_txn);
+ _database = _context.db();
+ _collection = _database->getCollection(ns());
+ if (_collection) {
+ _database->dropCollection(&_txn, ns());
}
+ _collection = _database->createCollection(&_txn, ns());
+ wunit.commit();
}
- protected:
- static const char *ns() {
- return "unittests.querytests";
- }
-
- void addIndex( const BSONObj &key ) {
- Helpers::ensureIndex(&_txn, _collection, key, false, key.firstElementFieldName());
- }
-
- void insert( const char *s ) {
- insert( fromjson( s ) );
- }
+ addIndex(fromjson("{\"a\":1}"));
+ }
- void insert( const BSONObj &o ) {
+ ~Base() {
+ try {
WriteUnitOfWork wunit(&_txn);
- if ( o["_id"].eoo() ) {
- BSONObjBuilder b;
- OID oid;
- oid.init();
- b.appendOID( "_id", &oid );
- b.appendElements( o );
- _collection->insertDocument( &_txn, b.obj(), false );
- }
- else {
- _collection->insertDocument( &_txn, o, false );
- }
+ uassertStatusOK(_database->dropCollection(&_txn, ns()));
wunit.commit();
+ } catch (...) {
+ FAIL("Exception while cleaning up collection");
}
+ }
+protected:
+ static const char* ns() {
+ return "unittests.querytests";
+ }
- OperationContextImpl _txn;
- ScopedTransaction _scopedXact;
- Lock::GlobalWrite _lk;
- Client::Context _context;
-
- Database* _database;
- Collection* _collection;
- };
-
- class FindOneOr : public Base {
- public:
- void run() {
- addIndex( BSON( "b" << 1 ) );
- addIndex( BSON( "c" << 1 ) );
- insert( BSON( "b" << 2 << "_id" << 0 ) );
- insert( BSON( "c" << 3 << "_id" << 1 ) );
- BSONObj query = fromjson( "{$or:[{b:2},{c:3}]}" );
- BSONObj ret;
- // Check findOne() returning object.
- ASSERT( Helpers::findOne( &_txn, _collection, query, ret, true ) );
- ASSERT_EQUALS( string( "b" ), ret.firstElement().fieldName() );
- // Cross check with findOne() returning location.
- ASSERT_EQUALS(ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
- }
- };
-
- class FindOneRequireIndex : public Base {
- public:
- void run() {
- insert( BSON( "b" << 2 << "_id" << 0 ) );
- BSONObj query = fromjson( "{b:2}" );
- BSONObj ret;
-
- // Check findOne() returning object, allowing unindexed scan.
- ASSERT( Helpers::findOne( &_txn, _collection, query, ret, false ) );
- // Check findOne() returning location, allowing unindexed scan.
- ASSERT_EQUALS(ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
-
- // Check findOne() returning object, requiring indexed scan without index.
- ASSERT_THROWS( Helpers::findOne( &_txn, _collection, query, ret, true ), MsgAssertionException );
- // Check findOne() returning location, requiring indexed scan without index.
- ASSERT_THROWS( Helpers::findOne( &_txn, _collection, query, true ), MsgAssertionException );
-
- addIndex( BSON( "b" << 1 ) );
- // Check findOne() returning object, requiring indexed scan with index.
- ASSERT( Helpers::findOne( &_txn, _collection, query, ret, true ) );
- // Check findOne() returning location, requiring indexed scan with index.
- ASSERT_EQUALS(ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
- }
- };
-
- class FindOneEmptyObj : public Base {
- public:
- void run() {
- // We don't normally allow empty objects in the database, but test that we can find
- // an empty object (one might be allowed inside a reserved namespace at some point).
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- Client::Context ctx(&_txn, "unittests.querytests" );
-
- {
- WriteUnitOfWork wunit(&_txn);
- Database* db = ctx.db();
- if ( db->getCollection( ns() ) ) {
- _collection = NULL;
- db->dropCollection( &_txn, ns() );
- }
- _collection = db->createCollection( &_txn, ns(), CollectionOptions(), true, false );
- wunit.commit();
- }
- ASSERT( _collection );
-
- DBDirectClient cl(&_txn);
- BSONObj info;
- bool ok = cl.runCommand( "unittests", BSON( "godinsert" << "querytests" << "obj" << BSONObj() ), info );
- ASSERT( ok );
-
- insert( BSONObj() );
- BSONObj query;
- BSONObj ret;
- ASSERT( Helpers::findOne( &_txn, _collection, query, ret, false ) );
- ASSERT( ret.isEmpty() );
- ASSERT_EQUALS(ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
- }
- };
-
- class ClientBase {
- public:
- ClientBase() : _client(&_txn) {
- _prevError = mongo::lastError._get( false );
- mongo::lastError.release();
- mongo::lastError.reset( new LastError() );
- _txn.getCurOp()->reset();
- }
- virtual ~ClientBase() {
- mongo::lastError.reset( _prevError );
- }
-
- protected:
- void insert( const char *ns, BSONObj o ) {
- _client.insert( ns, o );
- }
- void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
- _client.update( ns, Query( q ), o, upsert );
- }
- bool error() {
- return !_client.getPrevError().getField( "err" ).isNull();
- }
+ void addIndex(const BSONObj& key) {
+ Helpers::ensureIndex(&_txn, _collection, key, false, key.firstElementFieldName());
+ }
- OperationContextImpl _txn;
- DBDirectClient _client;
+ void insert(const char* s) {
+ insert(fromjson(s));
+ }
- private:
- LastError* _prevError;
- };
+ void insert(const BSONObj& o) {
+ WriteUnitOfWork wunit(&_txn);
+ if (o["_id"].eoo()) {
+ BSONObjBuilder b;
+ OID oid;
+ oid.init();
+ b.appendOID("_id", &oid);
+ b.appendElements(o);
+ _collection->insertDocument(&_txn, b.obj(), false);
+ } else {
+ _collection->insertDocument(&_txn, o, false);
+ }
+ wunit.commit();
+ }
- class BoundedKey : public ClientBase {
- public:
- ~BoundedKey() {
- _client.dropCollection( "unittests.querytests.BoundedKey" );
- }
- void run() {
- const char *ns = "unittests.querytests.BoundedKey";
- insert( ns, BSON( "a" << 1 ) );
- BSONObjBuilder a;
- a.appendMaxKey( "$lt" );
- BSONObj limit = a.done();
- ASSERT( !_client.findOne( ns, QUERY( "a" << limit ) ).isEmpty() );
- ASSERT_OK(dbtests::createIndex( &_txn, ns, BSON( "a" << 1 ) ));
- ASSERT( !_client.findOne( ns, QUERY( "a" << limit ).hint( BSON( "a" << 1 ) ) ).isEmpty() );
- }
- };
- class GetMore : public ClientBase {
- public:
- ~GetMore() {
- _client.dropCollection( "unittests.querytests.GetMore" );
- }
- void run() {
- const char *ns = "unittests.querytests.GetMore";
- insert( ns, BSON( "a" << 1 ) );
- insert( ns, BSON( "a" << 2 ) );
- insert( ns, BSON( "a" << 3 ) );
- auto_ptr< DBClientCursor > cursor = _client.query( ns, BSONObj(), 2 );
- long long cursorId = cursor->getCursorId();
- cursor->decouple();
- cursor.reset();
-
- {
- // Check internal server handoff to getmore.
- Client::WriteContext ctx(&_txn, ns);
- ClientCursorPin clientCursor( ctx.getCollection()->getCursorManager(), cursorId );
- // pq doesn't exist if it's a runner inside of the clientcursor.
- // ASSERT( clientCursor.c()->pq );
- // ASSERT_EQUALS( 2, clientCursor.c()->pq->getNumToReturn() );
- ASSERT_EQUALS( 2, clientCursor.c()->pos() );
+ OperationContextImpl _txn;
+ ScopedTransaction _scopedXact;
+ Lock::GlobalWrite _lk;
+ Client::Context _context;
+
+ Database* _database;
+ Collection* _collection;
+};
+
+class FindOneOr : public Base {
+public:
+ void run() {
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ insert(BSON("b" << 2 << "_id" << 0));
+ insert(BSON("c" << 3 << "_id" << 1));
+ BSONObj query = fromjson("{$or:[{b:2},{c:3}]}");
+ BSONObj ret;
+ // Check findOne() returning object.
+ ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true));
+ ASSERT_EQUALS(string("b"), ret.firstElement().fieldName());
+ // Cross check with findOne() returning location.
+ ASSERT_EQUALS(
+ ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
+ }
+};
+
+class FindOneRequireIndex : public Base {
+public:
+ void run() {
+ insert(BSON("b" << 2 << "_id" << 0));
+ BSONObj query = fromjson("{b:2}");
+ BSONObj ret;
+
+ // Check findOne() returning object, allowing unindexed scan.
+ ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false));
+ // Check findOne() returning location, allowing unindexed scan.
+ ASSERT_EQUALS(
+ ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
+
+ // Check findOne() returning object, requiring indexed scan without index.
+ ASSERT_THROWS(Helpers::findOne(&_txn, _collection, query, ret, true),
+ MsgAssertionException);
+ // Check findOne() returning location, requiring indexed scan without index.
+ ASSERT_THROWS(Helpers::findOne(&_txn, _collection, query, true), MsgAssertionException);
+
+ addIndex(BSON("b" << 1));
+ // Check findOne() returning object, requiring indexed scan with index.
+ ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true));
+ // Check findOne() returning location, requiring indexed scan with index.
+ ASSERT_EQUALS(
+ ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
+ }
+};
+
+class FindOneEmptyObj : public Base {
+public:
+ void run() {
+ // We don't normally allow empty objects in the database, but test that we can find
+ // an empty object (one might be allowed inside a reserved namespace at some point).
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ Client::Context ctx(&_txn, "unittests.querytests");
+
+ {
+ WriteUnitOfWork wunit(&_txn);
+ Database* db = ctx.db();
+ if (db->getCollection(ns())) {
+ _collection = NULL;
+ db->dropCollection(&_txn, ns());
}
-
- cursor = _client.getMore( ns, cursorId );
- ASSERT( cursor->more() );
- ASSERT_EQUALS( 3, cursor->next().getIntField( "a" ) );
- }
- };
-
- /**
- * An exception triggered during a get more request destroys the ClientCursor used by the get
- * more, preventing further iteration of the cursor in subsequent get mores.
- */
- class GetMoreKillOp : public ClientBase {
- public:
- ~GetMoreKillOp() {
- getGlobalEnvironment()->unsetKillAllOperations();
- _client.dropCollection( "unittests.querytests.GetMoreKillOp" );
+ _collection = db->createCollection(&_txn, ns(), CollectionOptions(), true, false);
+ wunit.commit();
}
- void run() {
-
- // Create a collection with some data.
- const char* ns = "unittests.querytests.GetMoreKillOp";
- for( int i = 0; i < 1000; ++i ) {
- insert( ns, BSON( "a" << i ) );
- }
-
- // Create a cursor on the collection, with a batch size of 200.
- auto_ptr<DBClientCursor> cursor = _client.query( ns, "", 0, 0, 0, 0, 200 );
- CursorId cursorId = cursor->getCursorId();
-
- // Count 500 results, spanning a few batches of documents.
- for( int i = 0; i < 500; ++i ) {
- ASSERT( cursor->more() );
- cursor->next();
- }
-
- // Set the killop kill all flag, forcing the next get more to fail with a kill op
- // exception.
- getGlobalEnvironment()->setKillAllOperations();
- while( cursor->more() ) {
- cursor->next();
- }
-
- // Revert the killop kill all flag.
- getGlobalEnvironment()->unsetKillAllOperations();
-
- // Check that the cursor has been removed.
- {
- AutoGetCollectionForRead ctx(&_txn, ns);
- ASSERT(0 == ctx.getCollection()->getCursorManager()->numCursors());
- }
+ ASSERT(_collection);
+
+ DBDirectClient cl(&_txn);
+ BSONObj info;
+ bool ok = cl.runCommand("unittests",
+ BSON("godinsert"
+ << "querytests"
+ << "obj" << BSONObj()),
+ info);
+ ASSERT(ok);
+
+ insert(BSONObj());
+ BSONObj query;
+ BSONObj ret;
+ ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false));
+ ASSERT(ret.isEmpty());
+ ASSERT_EQUALS(
+ ret,
+ _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
+ }
+};
+
+class ClientBase {
+public:
+ ClientBase() : _client(&_txn) {
+ _prevError = mongo::lastError._get(false);
+ mongo::lastError.release();
+ mongo::lastError.reset(new LastError());
+ _txn.getCurOp()->reset();
+ }
+ virtual ~ClientBase() {
+ mongo::lastError.reset(_prevError);
+ }
- ASSERT_FALSE(CursorManager::eraseCursorGlobal(&_txn, cursorId));
+protected:
+ void insert(const char* ns, BSONObj o) {
+ _client.insert(ns, o);
+ }
+ void update(const char* ns, BSONObj q, BSONObj o, bool upsert = 0) {
+ _client.update(ns, Query(q), o, upsert);
+ }
+ bool error() {
+ return !_client.getPrevError().getField("err").isNull();
+ }
- // Check that a subsequent get more fails with the cursor removed.
- ASSERT_THROWS( _client.getMore( ns, cursorId ), UserException );
- }
- };
-
- /**
- * A get more exception caused by an invalid or unauthorized get more request does not cause
- * the get more's ClientCursor to be destroyed. This prevents an unauthorized user from
- * improperly killing a cursor by issuing an invalid get more request.
- */
- class GetMoreInvalidRequest : public ClientBase {
- public:
- ~GetMoreInvalidRequest() {
- getGlobalEnvironment()->unsetKillAllOperations();
- _client.dropCollection( "unittests.querytests.GetMoreInvalidRequest" );
- }
- void run() {
+ OperationContextImpl _txn;
+ DBDirectClient _client;
- // Create a collection with some data.
- const char* ns = "unittests.querytests.GetMoreInvalidRequest";
- for( int i = 0; i < 1000; ++i ) {
- insert( ns, BSON( "a" << i ) );
- }
-
- // Create a cursor on the collection, with a batch size of 200.
- auto_ptr<DBClientCursor> cursor = _client.query( ns, "", 0, 0, 0, 0, 200 );
- CursorId cursorId = cursor->getCursorId();
-
- // Count 500 results, spanning a few batches of documents.
- int count = 0;
- for( int i = 0; i < 500; ++i ) {
- ASSERT( cursor->more() );
- cursor->next();
- ++count;
- }
+private:
+ LastError* _prevError;
+};
- // Send a get more with a namespace that is incorrect ('spoofed') for this cursor id.
- // This is the invalaid get more request described in the comment preceding this class.
- _client.getMore
- ( "unittests.querytests.GetMoreInvalidRequest_WRONG_NAMESPACE_FOR_CURSOR",
- cursor->getCursorId() );
-
- // Check that the cursor still exists
- {
- AutoGetCollectionForRead ctx(&_txn, ns);
- ASSERT(1 == ctx.getCollection()->getCursorManager()->numCursors());
- ASSERT(ctx.getCollection()->getCursorManager()->find(cursorId, false));
- }
+class BoundedKey : public ClientBase {
+public:
+ ~BoundedKey() {
+ _client.dropCollection("unittests.querytests.BoundedKey");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.BoundedKey";
+ insert(ns, BSON("a" << 1));
+ BSONObjBuilder a;
+ a.appendMaxKey("$lt");
+ BSONObj limit = a.done();
+ ASSERT(!_client.findOne(ns, QUERY("a" << limit)).isEmpty());
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT(!_client.findOne(ns, QUERY("a" << limit).hint(BSON("a" << 1))).isEmpty());
+ }
+};
- // Check that the cursor can be iterated until all documents are returned.
- while( cursor->more() ) {
- cursor->next();
- ++count;
- }
- ASSERT_EQUALS( 1000, count );
- }
- };
-
- class PositiveLimit : public ClientBase {
- public:
- const char* ns;
- PositiveLimit() : ns("unittests.querytests.PositiveLimit") {}
- ~PositiveLimit() {
- _client.dropCollection( ns );
- }
+class GetMore : public ClientBase {
+public:
+ ~GetMore() {
+ _client.dropCollection("unittests.querytests.GetMore");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.GetMore";
+ insert(ns, BSON("a" << 1));
+ insert(ns, BSON("a" << 2));
+ insert(ns, BSON("a" << 3));
+ auto_ptr<DBClientCursor> cursor = _client.query(ns, BSONObj(), 2);
+ long long cursorId = cursor->getCursorId();
+ cursor->decouple();
+ cursor.reset();
+
+ {
+ // Check internal server handoff to getmore.
+ Client::WriteContext ctx(&_txn, ns);
+ ClientCursorPin clientCursor(ctx.getCollection()->getCursorManager(), cursorId);
+ // pq doesn't exist if it's a runner inside of the clientcursor.
+ // ASSERT( clientCursor.c()->pq );
+ // ASSERT_EQUALS( 2, clientCursor.c()->pq->getNumToReturn() );
+ ASSERT_EQUALS(2, clientCursor.c()->pos());
+ }
+
+ cursor = _client.getMore(ns, cursorId);
+ ASSERT(cursor->more());
+ ASSERT_EQUALS(3, cursor->next().getIntField("a"));
+ }
+};
- void testLimit(int limit) {
- ASSERT_EQUALS(_client.query( ns, BSONObj(), limit )->itcount(), limit);
- }
- void run() {
- for(int i=0; i<1000; i++)
- insert( ns, BSON( GENOID << "i" << i ) );
-
- ASSERT_EQUALS( _client.query(ns, BSONObj(), 1 )->itcount(), 1);
- ASSERT_EQUALS( _client.query(ns, BSONObj(), 10 )->itcount(), 10);
- ASSERT_EQUALS( _client.query(ns, BSONObj(), 101 )->itcount(), 101);
- ASSERT_EQUALS( _client.query(ns, BSONObj(), 999 )->itcount(), 999);
- ASSERT_EQUALS( _client.query(ns, BSONObj(), 1000 )->itcount(), 1000);
- ASSERT_EQUALS( _client.query(ns, BSONObj(), 1001 )->itcount(), 1000);
- ASSERT_EQUALS( _client.query(ns, BSONObj(), 0 )->itcount(), 1000);
+/**
+ * An exception triggered during a get more request destroys the ClientCursor used by the get
+ * more, preventing further iteration of the cursor in subsequent get mores.
+ */
+class GetMoreKillOp : public ClientBase {
+public:
+ ~GetMoreKillOp() {
+ getGlobalEnvironment()->unsetKillAllOperations();
+ _client.dropCollection("unittests.querytests.GetMoreKillOp");
+ }
+ void run() {
+ // Create a collection with some data.
+ const char* ns = "unittests.querytests.GetMoreKillOp";
+ for (int i = 0; i < 1000; ++i) {
+ insert(ns, BSON("a" << i));
}
- };
- class ReturnOneOfManyAndTail : public ClientBase {
- public:
- ~ReturnOneOfManyAndTail() {
- _client.dropCollection( "unittests.querytests.ReturnOneOfManyAndTail" );
- }
- void run() {
- const char *ns = "unittests.querytests.ReturnOneOfManyAndTail";
- _client.createCollection( ns, 1024, true );
- insert( ns, BSON( "a" << 0 ) );
- insert( ns, BSON( "a" << 1 ) );
- insert( ns, BSON( "a" << 2 ) );
- auto_ptr< DBClientCursor > c = _client.query( ns, QUERY( "a" << GT << 0 ).hint( BSON( "$natural" << 1 ) ), 1, 0, 0, QueryOption_CursorTailable );
- // If only one result requested, a cursor is not saved.
- ASSERT_EQUALS( 0, c->getCursorId() );
- ASSERT( c->more() );
- ASSERT_EQUALS( 1, c->next().getIntField( "a" ) );
- }
- };
+ // Create a cursor on the collection, with a batch size of 200.
+ auto_ptr<DBClientCursor> cursor = _client.query(ns, "", 0, 0, 0, 0, 200);
+ CursorId cursorId = cursor->getCursorId();
- class TailNotAtEnd : public ClientBase {
- public:
- ~TailNotAtEnd() {
- _client.dropCollection( "unittests.querytests.TailNotAtEnd" );
- }
- void run() {
- const char *ns = "unittests.querytests.TailNotAtEnd";
- _client.createCollection( ns, 2047, true );
- insert( ns, BSON( "a" << 0 ) );
- insert( ns, BSON( "a" << 1 ) );
- insert( ns, BSON( "a" << 2 ) );
- auto_ptr< DBClientCursor > c = _client.query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
- ASSERT( 0 != c->getCursorId() );
- while( c->more() )
- c->next();
- ASSERT( 0 != c->getCursorId() );
- insert( ns, BSON( "a" << 3 ) );
- insert( ns, BSON( "a" << 4 ) );
- insert( ns, BSON( "a" << 5 ) );
- insert( ns, BSON( "a" << 6 ) );
- ASSERT( c->more() );
- ASSERT_EQUALS( 3, c->next().getIntField( "a" ) );
+ // Count 500 results, spanning a few batches of documents.
+ for (int i = 0; i < 500; ++i) {
+ ASSERT(cursor->more());
+ cursor->next();
}
- };
- class EmptyTail : public ClientBase {
- public:
- ~EmptyTail() {
- _client.dropCollection( "unittests.querytests.EmptyTail" );
+ // Set the killop kill all flag, forcing the next get more to fail with a kill op
+ // exception.
+ getGlobalEnvironment()->setKillAllOperations();
+ while (cursor->more()) {
+ cursor->next();
}
- void run() {
- const char *ns = "unittests.querytests.EmptyTail";
- _client.createCollection( ns, 1900, true );
- auto_ptr< DBClientCursor > c = _client.query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
- ASSERT_EQUALS( 0, c->getCursorId() );
- ASSERT( c->isDead() );
- insert( ns, BSON( "a" << 0 ) );
- c = _client.query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
- ASSERT( 0 != c->getCursorId() );
- ASSERT( !c->isDead() );
- }
- };
- class TailableDelete : public ClientBase {
- public:
- ~TailableDelete() {
- _client.dropCollection( "unittests.querytests.TailableDelete" );
- }
- void run() {
- const char *ns = "unittests.querytests.TailableDelete";
- _client.createCollection( ns, 8192, true, 2 );
- insert( ns, BSON( "a" << 0 ) );
- insert( ns, BSON( "a" << 1 ) );
- auto_ptr< DBClientCursor > c = _client.query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
- c->next();
- c->next();
- ASSERT( !c->more() );
- insert( ns, BSON( "a" << 2 ) );
- insert( ns, BSON( "a" << 3 ) );
-
- // This can either have been killed, or jumped to the right thing.
- // Key is that it can't skip.
- if ( c->more() ) {
- BSONObj x = c->next();
- ASSERT_EQUALS( 2, x["a"].numberInt() );
- }
+ // Revert the killop kill all flag.
+ getGlobalEnvironment()->unsetKillAllOperations();
- // Inserting a document into a capped collection can force another document out.
- // In this case, the capped collection has 2 documents, so inserting two more clobbers
- // whatever RecordId that the underlying cursor had as its state.
- //
- // In the Cursor world, the ClientCursor was responsible for manipulating cursors. It
- // would detect that the cursor's "refloc" (translation: diskloc required to maintain
- // iteration state) was being clobbered and it would kill the cursor.
- //
- // In the Runner world there is no notion of a "refloc" and as such the invalidation
- // broadcast code doesn't know enough to know that the underlying collection iteration
- // can't proceed.
- // ASSERT_EQUALS( 0, c->getCursorId() );
+ // Check that the cursor has been removed.
+ {
+ AutoGetCollectionForRead ctx(&_txn, ns);
+ ASSERT(0 == ctx.getCollection()->getCursorManager()->numCursors());
}
- };
- class TailableDelete2 : public ClientBase {
- public:
- ~TailableDelete2() {
- _client.dropCollection( "unittests.querytests.TailableDelete" );
- }
- void run() {
- const char *ns = "unittests.querytests.TailableDelete";
- _client.createCollection( ns, 8192, true, 2 );
- insert( ns, BSON( "a" << 0 ) );
- insert( ns, BSON( "a" << 1 ) );
- auto_ptr< DBClientCursor > c = _client.query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
- c->next();
- c->next();
- ASSERT( !c->more() );
- insert( ns, BSON( "a" << 2 ) );
- insert( ns, BSON( "a" << 3 ) );
- insert( ns, BSON( "a" << 4 ) );
-
- // This can either have been killed, or jumped to the right thing.
- // Key is that it can't skip.
- if ( c->more() ) {
- BSONObj x = c->next();
- ASSERT_EQUALS( 2, x["a"].numberInt() );
- }
- }
- };
+ ASSERT_FALSE(CursorManager::eraseCursorGlobal(&_txn, cursorId));
+ // Check that a subsequent get more fails with the cursor removed.
+ ASSERT_THROWS(_client.getMore(ns, cursorId), UserException);
+ }
+};
- class TailableInsertDelete : public ClientBase {
- public:
- ~TailableInsertDelete() {
- _client.dropCollection( "unittests.querytests.TailableInsertDelete" );
- }
- void run() {
- const char *ns = "unittests.querytests.TailableInsertDelete";
- _client.createCollection( ns, 1330, true );
- insert( ns, BSON( "a" << 0 ) );
- insert( ns, BSON( "a" << 1 ) );
- auto_ptr< DBClientCursor > c = _client.query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
- c->next();
- c->next();
- ASSERT( !c->more() );
- insert( ns, BSON( "a" << 2 ) );
- _client.remove( ns, QUERY( "a" << 1 ) );
- ASSERT( c->more() );
- ASSERT_EQUALS( 2, c->next().getIntField( "a" ) );
- ASSERT( !c->more() );
+/**
+ * A get more exception caused by an invalid or unauthorized get more request does not cause
+ * the get more's ClientCursor to be destroyed. This prevents an unauthorized user from
+ * improperly killing a cursor by issuing an invalid get more request.
+ */
+class GetMoreInvalidRequest : public ClientBase {
+public:
+ ~GetMoreInvalidRequest() {
+ getGlobalEnvironment()->unsetKillAllOperations();
+ _client.dropCollection("unittests.querytests.GetMoreInvalidRequest");
+ }
+ void run() {
+ // Create a collection with some data.
+ const char* ns = "unittests.querytests.GetMoreInvalidRequest";
+ for (int i = 0; i < 1000; ++i) {
+ insert(ns, BSON("a" << i));
}
- };
- class TailCappedOnly : public ClientBase {
- public:
- ~TailCappedOnly() {
- _client.dropCollection( "unittest.querytests.TailCappedOnly" );
- }
- void run() {
- const char *ns = "unittests.querytests.TailCappedOnly";
- _client.insert( ns, BSONObj() );
- auto_ptr< DBClientCursor > c = _client.query( ns, BSONObj(), 0, 0, 0, QueryOption_CursorTailable );
- ASSERT( c->isDead() );
- }
- };
+ // Create a cursor on the collection, with a batch size of 200.
+ auto_ptr<DBClientCursor> cursor = _client.query(ns, "", 0, 0, 0, 0, 200);
+ CursorId cursorId = cursor->getCursorId();
- class TailableQueryOnId : public ClientBase {
- public:
- ~TailableQueryOnId() {
- _client.dropCollection( "unittests.querytests.TailableQueryOnId" );
+ // Count 500 results, spanning a few batches of documents.
+ int count = 0;
+ for (int i = 0; i < 500; ++i) {
+ ASSERT(cursor->more());
+ cursor->next();
+ ++count;
}
- void insertA(const char* ns, int a) {
- BSONObjBuilder b;
- b.appendOID("_id", 0, true);
- b.appendOID("value", 0, true);
- b.append("a", a);
- insert(ns, b.obj());
- }
-
- void run() {
- const char *ns = "unittests.querytests.TailableQueryOnId";
- BSONObj info;
- _client.runCommand( "unittests", BSON( "create" << "querytests.TailableQueryOnId" << "capped" << true << "size" << 8192 << "autoIndexId" << true ), info );
- insertA( ns, 0 );
- insertA( ns, 1 );
- auto_ptr< DBClientCursor > c1 = _client.query( ns, QUERY( "a" << GT << -1 ), 0, 0, 0, QueryOption_CursorTailable );
- OID id;
- id.init("000000000000000000000000");
- auto_ptr< DBClientCursor > c2 = _client.query( ns, QUERY( "value" << GT << id ), 0, 0, 0, QueryOption_CursorTailable );
- c1->next();
- c1->next();
- ASSERT( !c1->more() );
- c2->next();
- c2->next();
- ASSERT( !c2->more() );
- insertA( ns, 2 );
- ASSERT( c1->more() );
- ASSERT_EQUALS( 2, c1->next().getIntField( "a" ) );
- ASSERT( !c1->more() );
- ASSERT( c2->more() );
- ASSERT_EQUALS( 2, c2->next().getIntField( "a" ) ); // SERVER-645
- ASSERT( !c2->more() );
- ASSERT( !c2->isDead() );
- }
- };
+ // Send a get more with a namespace that is incorrect ('spoofed') for this cursor id.
+ // This is the invalaid get more request described in the comment preceding this class.
+ _client.getMore("unittests.querytests.GetMoreInvalidRequest_WRONG_NAMESPACE_FOR_CURSOR",
+ cursor->getCursorId());
- class OplogReplayMode : public ClientBase {
- public:
- ~OplogReplayMode() {
- _client.dropCollection( "unittests.querytests.OplogReplayMode" );
- }
- void run() {
- const char *ns = "unittests.querytests.OplogReplayMode";
- insert( ns, BSON( "ts" << 0 ) );
- insert( ns, BSON( "ts" << 1 ) );
- insert( ns, BSON( "ts" << 2 ) );
- auto_ptr< DBClientCursor > c = _client.query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( c->more() );
- ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
- ASSERT( !c->more() );
-
- insert( ns, BSON( "ts" << 3 ) );
- c = _client.query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( c->more() );
- ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
- ASSERT( c->more() );
+ // Check that the cursor still exists
+ {
+ AutoGetCollectionForRead ctx(&_txn, ns);
+ ASSERT(1 == ctx.getCollection()->getCursorManager()->numCursors());
+ ASSERT(ctx.getCollection()->getCursorManager()->find(cursorId, false));
}
- };
- class OplogReplaySlaveReadTill : public ClientBase {
- public:
- ~OplogReplaySlaveReadTill() {
- _client.dropCollection( "unittests.querytests.OplogReplaySlaveReadTill" );
+ // Check that the cursor can be iterated until all documents are returned.
+ while (cursor->more()) {
+ cursor->next();
+ ++count;
}
- void run() {
- const char *ns = "unittests.querytests.OplogReplaySlaveReadTill";
- ScopedTransaction transaction(&_txn, MODE_IX);
- Lock::DBLock lk(_txn.lockState(), "unittests", MODE_X);
- Client::Context ctx(&_txn, ns );
-
- BSONObj info;
- _client.runCommand( "unittests",
- BSON( "create" << "querytests.OplogReplaySlaveReadTill" <<
- "capped" << true << "size" << 8192 ),
- info );
-
- Date_t one = getNextGlobalOptime().asDate();
- Date_t two = getNextGlobalOptime().asDate();
- Date_t three = getNextGlobalOptime().asDate();
- insert( ns, BSON( "ts" << one ) );
- insert( ns, BSON( "ts" << two ) );
- insert( ns, BSON( "ts" << three ) );
- auto_ptr<DBClientCursor> c =
- _client.query( ns, QUERY( "ts" << GTE << two ).hint( BSON( "$natural" << 1 ) ),
- 0, 0, 0, QueryOption_OplogReplay | QueryOption_CursorTailable );
- ASSERT( c->more() );
- ASSERT_EQUALS( two, c->next()["ts"].Date() );
- long long cursorId = c->getCursorId();
-
- ClientCursorPin clientCursor( ctx.db()->getCollection( ns )->getCursorManager(),
- cursorId );
- ASSERT_EQUALS( three.millis, clientCursor.c()->getSlaveReadTill().asDate() );
- }
- };
+ ASSERT_EQUALS(1000, count);
+ }
+};
+
+class PositiveLimit : public ClientBase {
+public:
+ const char* ns;
+ PositiveLimit() : ns("unittests.querytests.PositiveLimit") {}
+ ~PositiveLimit() {
+ _client.dropCollection(ns);
+ }
- class OplogReplayExplain : public ClientBase {
- public:
- ~OplogReplayExplain() {
- _client.dropCollection( "unittests.querytests.OplogReplayExplain" );
- }
- void run() {
- const char *ns = "unittests.querytests.OplogReplayExplain";
- insert( ns, BSON( "ts" << 0 ) );
- insert( ns, BSON( "ts" << 1 ) );
- insert( ns, BSON( "ts" << 2 ) );
- auto_ptr< DBClientCursor > c = _client.query(
- ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ).explain(),
- 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( c->more() );
-
- // Check number of results and filterSet flag in explain.
- // filterSet is not available in oplog replay mode.
- BSONObj explainObj = c->next();
- ASSERT( explainObj.hasField("executionStats") );
- BSONObj execStats = explainObj["executionStats"].Obj();
- ASSERT_EQUALS( 1, execStats.getIntField( "nReturned" ) );
-
- ASSERT( !c->more() );
- }
- };
+ void testLimit(int limit) {
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), limit)->itcount(), limit);
+ }
+ void run() {
+ for (int i = 0; i < 1000; i++)
+ insert(ns, BSON(GENOID << "i" << i));
+
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), 1)->itcount(), 1);
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), 10)->itcount(), 10);
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), 101)->itcount(), 101);
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), 999)->itcount(), 999);
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), 1000)->itcount(), 1000);
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), 1001)->itcount(), 1000);
+ ASSERT_EQUALS(_client.query(ns, BSONObj(), 0)->itcount(), 1000);
+ }
+};
- class BasicCount : public ClientBase {
- public:
- ~BasicCount() {
- _client.dropCollection( "unittests.querytests.BasicCount" );
- }
- void run() {
- const char *ns = "unittests.querytests.BasicCount";
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON( "a" << 1 ) ));
- count( 0 );
- insert( ns, BSON( "a" << 3 ) );
- count( 0 );
- insert( ns, BSON( "a" << 4 ) );
- count( 1 );
- insert( ns, BSON( "a" << 5 ) );
- count( 1 );
- insert( ns, BSON( "a" << 4 ) );
- count( 2 );
- }
- private:
- void count( unsigned long long c ) {
- ASSERT_EQUALS( c, _client.count( "unittests.querytests.BasicCount", BSON( "a" << 4 ) ) );
- }
- };
+class ReturnOneOfManyAndTail : public ClientBase {
+public:
+ ~ReturnOneOfManyAndTail() {
+ _client.dropCollection("unittests.querytests.ReturnOneOfManyAndTail");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.ReturnOneOfManyAndTail";
+ _client.createCollection(ns, 1024, true);
+ insert(ns, BSON("a" << 0));
+ insert(ns, BSON("a" << 1));
+ insert(ns, BSON("a" << 2));
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns,
+ QUERY("a" << GT << 0).hint(BSON("$natural" << 1)),
+ 1,
+ 0,
+ 0,
+ QueryOption_CursorTailable);
+ // If only one result requested, a cursor is not saved.
+ ASSERT_EQUALS(0, c->getCursorId());
+ ASSERT(c->more());
+ ASSERT_EQUALS(1, c->next().getIntField("a"));
+ }
+};
- class ArrayId : public ClientBase {
- public:
- ~ArrayId() {
- _client.dropCollection( "unittests.querytests.ArrayId" );
- }
- void run() {
- const char *ns = "unittests.querytests.ArrayId";
- ASSERT_OK(dbtests::createIndex( &_txn, ns, BSON( "_id" << 1 ) ));
- ASSERT( !error() );
- _client.insert( ns, fromjson( "{'_id':[1,2]}" ) );
- ASSERT( error() );
- }
- };
+class TailNotAtEnd : public ClientBase {
+public:
+ ~TailNotAtEnd() {
+ _client.dropCollection("unittests.querytests.TailNotAtEnd");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.TailNotAtEnd";
+ _client.createCollection(ns, 2047, true);
+ insert(ns, BSON("a" << 0));
+ insert(ns, BSON("a" << 1));
+ insert(ns, BSON("a" << 2));
+ auto_ptr<DBClientCursor> c = _client.query(
+ ns, Query().hint(BSON("$natural" << 1)), 2, 0, 0, QueryOption_CursorTailable);
+ ASSERT(0 != c->getCursorId());
+ while (c->more())
+ c->next();
+ ASSERT(0 != c->getCursorId());
+ insert(ns, BSON("a" << 3));
+ insert(ns, BSON("a" << 4));
+ insert(ns, BSON("a" << 5));
+ insert(ns, BSON("a" << 6));
+ ASSERT(c->more());
+ ASSERT_EQUALS(3, c->next().getIntField("a"));
+ }
+};
- class UnderscoreNs : public ClientBase {
- public:
- ~UnderscoreNs() {
- _client.dropCollection( "unittests.querytests._UnderscoreNs" );
- }
- void run() {
- ASSERT( !error() );
- const char *ns = "unittests.querytests._UnderscoreNs";
- ASSERT( _client.findOne( ns, "{}" ).isEmpty() );
- _client.insert( ns, BSON( "a" << 1 ) );
- ASSERT_EQUALS( 1, _client.findOne( ns, "{}" ).getIntField( "a" ) );
- ASSERT( !error() );
- }
- };
+class EmptyTail : public ClientBase {
+public:
+ ~EmptyTail() {
+ _client.dropCollection("unittests.querytests.EmptyTail");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.EmptyTail";
+ _client.createCollection(ns, 1900, true);
+ auto_ptr<DBClientCursor> c = _client.query(
+ ns, Query().hint(BSON("$natural" << 1)), 2, 0, 0, QueryOption_CursorTailable);
+ ASSERT_EQUALS(0, c->getCursorId());
+ ASSERT(c->isDead());
+ insert(ns, BSON("a" << 0));
+ c = _client.query(
+ ns, QUERY("a" << 1).hint(BSON("$natural" << 1)), 2, 0, 0, QueryOption_CursorTailable);
+ ASSERT(0 != c->getCursorId());
+ ASSERT(!c->isDead());
+ }
+};
- class EmptyFieldSpec : public ClientBase {
- public:
- ~EmptyFieldSpec() {
- _client.dropCollection( "unittests.querytests.EmptyFieldSpec" );
- }
- void run() {
- const char *ns = "unittests.querytests.EmptyFieldSpec";
- _client.insert( ns, BSON( "a" << 1 ) );
- ASSERT( !_client.findOne( ns, "" ).isEmpty() );
- BSONObj empty;
- ASSERT( !_client.findOne( ns, "", &empty ).isEmpty() );
- }
- };
+class TailableDelete : public ClientBase {
+public:
+ ~TailableDelete() {
+ _client.dropCollection("unittests.querytests.TailableDelete");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.TailableDelete";
+ _client.createCollection(ns, 8192, true, 2);
+ insert(ns, BSON("a" << 0));
+ insert(ns, BSON("a" << 1));
+ auto_ptr<DBClientCursor> c = _client.query(
+ ns, Query().hint(BSON("$natural" << 1)), 2, 0, 0, QueryOption_CursorTailable);
+ c->next();
+ c->next();
+ ASSERT(!c->more());
+ insert(ns, BSON("a" << 2));
+ insert(ns, BSON("a" << 3));
+
+ // This can either have been killed, or jumped to the right thing.
+ // Key is that it can't skip.
+ if (c->more()) {
+ BSONObj x = c->next();
+ ASSERT_EQUALS(2, x["a"].numberInt());
+ }
+
+ // Inserting a document into a capped collection can force another document out.
+ // In this case, the capped collection has 2 documents, so inserting two more clobbers
+ // whatever RecordId that the underlying cursor had as its state.
+ //
+ // In the Cursor world, the ClientCursor was responsible for manipulating cursors. It
+ // would detect that the cursor's "refloc" (translation: diskloc required to maintain
+ // iteration state) was being clobbered and it would kill the cursor.
+ //
+ // In the Runner world there is no notion of a "refloc" and as such the invalidation
+ // broadcast code doesn't know enough to know that the underlying collection iteration
+ // can't proceed.
+ // ASSERT_EQUALS( 0, c->getCursorId() );
+ }
+};
- class MultiNe : public ClientBase {
- public:
- ~MultiNe() {
- _client.dropCollection( "unittests.querytests.Ne" );
- }
- void run() {
- const char *ns = "unittests.querytests.Ne";
- _client.insert( ns, fromjson( "{a:[1,2]}" ) );
- ASSERT( _client.findOne( ns, fromjson( "{a:{$ne:1}}" ) ).isEmpty() );
- BSONObj spec = fromjson( "{a:{$ne:1,$ne:2}}" );
- ASSERT( _client.findOne( ns, spec ).isEmpty() );
+class TailableDelete2 : public ClientBase {
+public:
+ ~TailableDelete2() {
+ _client.dropCollection("unittests.querytests.TailableDelete");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.TailableDelete";
+ _client.createCollection(ns, 8192, true, 2);
+ insert(ns, BSON("a" << 0));
+ insert(ns, BSON("a" << 1));
+ auto_ptr<DBClientCursor> c = _client.query(
+ ns, Query().hint(BSON("$natural" << 1)), 2, 0, 0, QueryOption_CursorTailable);
+ c->next();
+ c->next();
+ ASSERT(!c->more());
+ insert(ns, BSON("a" << 2));
+ insert(ns, BSON("a" << 3));
+ insert(ns, BSON("a" << 4));
+
+ // This can either have been killed, or jumped to the right thing.
+ // Key is that it can't skip.
+ if (c->more()) {
+ BSONObj x = c->next();
+ ASSERT_EQUALS(2, x["a"].numberInt());
}
- };
+ }
+};
- class EmbeddedNe : public ClientBase {
- public:
- ~EmbeddedNe() {
- _client.dropCollection( "unittests.querytests.NestedNe" );
- }
- void run() {
- const char *ns = "unittests.querytests.NestedNe";
- _client.insert( ns, fromjson( "{a:[{b:1},{b:2}]}" ) );
- ASSERT( _client.findOne( ns, fromjson( "{'a.b':{$ne:1}}" ) ).isEmpty() );
- }
- };
- class EmbeddedNumericTypes : public ClientBase {
- public:
- ~EmbeddedNumericTypes() {
- _client.dropCollection( "unittests.querytests.NumericEmbedded" );
- }
- void run() {
- const char *ns = "unittests.querytests.NumericEmbedded";
- _client.insert( ns, BSON( "a" << BSON ( "b" << 1 ) ) );
- ASSERT( ! _client.findOne( ns, BSON( "a" << BSON ( "b" << 1.0 ) ) ).isEmpty() );
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ) ));
- ASSERT( ! _client.findOne( ns, BSON( "a" << BSON ( "b" << 1.0 ) ) ).isEmpty() );
- }
- };
+class TailableInsertDelete : public ClientBase {
+public:
+ ~TailableInsertDelete() {
+ _client.dropCollection("unittests.querytests.TailableInsertDelete");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.TailableInsertDelete";
+ _client.createCollection(ns, 1330, true);
+ insert(ns, BSON("a" << 0));
+ insert(ns, BSON("a" << 1));
+ auto_ptr<DBClientCursor> c = _client.query(
+ ns, Query().hint(BSON("$natural" << 1)), 2, 0, 0, QueryOption_CursorTailable);
+ c->next();
+ c->next();
+ ASSERT(!c->more());
+ insert(ns, BSON("a" << 2));
+ _client.remove(ns, QUERY("a" << 1));
+ ASSERT(c->more());
+ ASSERT_EQUALS(2, c->next().getIntField("a"));
+ ASSERT(!c->more());
+ }
+};
- class AutoResetIndexCache : public ClientBase {
- public:
- ~AutoResetIndexCache() {
- _client.dropCollection( "unittests.querytests.AutoResetIndexCache" );
- }
- static const char *ns() { return "unittests.querytests.AutoResetIndexCache"; }
- static const char *idxNs() { return "unittests.system.indexes"; }
- void index() { ASSERT_EQUALS(2u, _client.getIndexSpecs(ns()).size()); }
- void noIndex() { ASSERT_EQUALS(0u, _client.getIndexSpecs(ns()).size()); }
- void checkIndex() {
- ASSERT_OK(dbtests::createIndex( &_txn, ns() , BSON( "a" << 1 ) ));
- index();
- }
- void run() {
- _client.dropDatabase( "unittests" );
- noIndex();
- checkIndex();
- _client.dropCollection( ns() );
- noIndex();
- checkIndex();
- _client.dropDatabase( "unittests" );
- noIndex();
- checkIndex();
- }
- };
+class TailCappedOnly : public ClientBase {
+public:
+ ~TailCappedOnly() {
+ _client.dropCollection("unittest.querytests.TailCappedOnly");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.TailCappedOnly";
+ _client.insert(ns, BSONObj());
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns, BSONObj(), 0, 0, 0, QueryOption_CursorTailable);
+ ASSERT(c->isDead());
+ }
+};
- class UniqueIndex : public ClientBase {
- public:
- ~UniqueIndex() {
- _client.dropCollection( "unittests.querytests.UniqueIndex" );
- }
- void run() {
- const char *ns = "unittests.querytests.UniqueIndex";
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ), true ));
- _client.insert( ns, BSON( "a" << 4 << "b" << 2 ) );
- _client.insert( ns, BSON( "a" << 4 << "b" << 3 ) );
- ASSERT_EQUALS( 1U, _client.count( ns, BSONObj() ) );
- _client.dropCollection( ns );
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "b" << 1 ), true ));
- _client.insert( ns, BSON( "a" << 4 << "b" << 2 ) );
- _client.insert( ns, BSON( "a" << 4 << "b" << 3 ) );
- ASSERT_EQUALS( 2U, _client.count( ns, BSONObj() ) );
- }
- };
+class TailableQueryOnId : public ClientBase {
+public:
+ ~TailableQueryOnId() {
+ _client.dropCollection("unittests.querytests.TailableQueryOnId");
+ }
- class UniqueIndexPreexistingData : public ClientBase {
- public:
- ~UniqueIndexPreexistingData() {
- _client.dropCollection( "unittests.querytests.UniqueIndexPreexistingData" );
- }
- void run() {
- const char *ns = "unittests.querytests.UniqueIndexPreexistingData";
- _client.insert( ns, BSON( "a" << 4 << "b" << 2 ) );
- _client.insert( ns, BSON( "a" << 4 << "b" << 3 ) );
- ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ), true ));
- ASSERT_EQUALS( 0U, _client.count( "unittests.system.indexes", BSON( "ns" << ns << "name" << NE << "_id_" ) ) );
- }
- };
+ void insertA(const char* ns, int a) {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.appendOID("value", 0, true);
+ b.append("a", a);
+ insert(ns, b.obj());
+ }
- class SubobjectInArray : public ClientBase {
- public:
- ~SubobjectInArray() {
- _client.dropCollection( "unittests.querytests.SubobjectInArray" );
- }
- void run() {
- const char *ns = "unittests.querytests.SubobjectInArray";
- _client.insert( ns, fromjson( "{a:[{b:{c:1}}]}" ) );
- ASSERT( !_client.findOne( ns, BSON( "a.b.c" << 1 ) ).isEmpty() );
- ASSERT( !_client.findOne( ns, fromjson( "{'a.c':null}" ) ).isEmpty() );
- }
- };
+ void run() {
+ const char* ns = "unittests.querytests.TailableQueryOnId";
+ BSONObj info;
+ _client.runCommand("unittests",
+ BSON("create"
+ << "querytests.TailableQueryOnId"
+ << "capped" << true << "size" << 8192 << "autoIndexId" << true),
+ info);
+ insertA(ns, 0);
+ insertA(ns, 1);
+ auto_ptr<DBClientCursor> c1 =
+ _client.query(ns, QUERY("a" << GT << -1), 0, 0, 0, QueryOption_CursorTailable);
+ OID id;
+ id.init("000000000000000000000000");
+ auto_ptr<DBClientCursor> c2 =
+ _client.query(ns, QUERY("value" << GT << id), 0, 0, 0, QueryOption_CursorTailable);
+ c1->next();
+ c1->next();
+ ASSERT(!c1->more());
+ c2->next();
+ c2->next();
+ ASSERT(!c2->more());
+ insertA(ns, 2);
+ ASSERT(c1->more());
+ ASSERT_EQUALS(2, c1->next().getIntField("a"));
+ ASSERT(!c1->more());
+ ASSERT(c2->more());
+ ASSERT_EQUALS(2, c2->next().getIntField("a")); // SERVER-645
+ ASSERT(!c2->more());
+ ASSERT(!c2->isDead());
+ }
+};
- class Size : public ClientBase {
- public:
- ~Size() {
- _client.dropCollection( "unittests.querytests.Size" );
- }
- void run() {
- const char *ns = "unittests.querytests.Size";
- _client.insert( ns, fromjson( "{a:[1,2,3]}" ) );
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ) ));
- ASSERT( _client.query( ns, QUERY( "a" << mongo::BSIZE << 3 ).hint( BSON( "a" << 1 ) ) )->more() );
- }
- };
+class OplogReplayMode : public ClientBase {
+public:
+ ~OplogReplayMode() {
+ _client.dropCollection("unittests.querytests.OplogReplayMode");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.OplogReplayMode";
+ insert(ns, BSON("ts" << 0));
+ insert(ns, BSON("ts" << 1));
+ insert(ns, BSON("ts" << 2));
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns,
+ QUERY("ts" << GT << 1).hint(BSON("$natural" << 1)),
+ 0,
+ 0,
+ 0,
+ QueryOption_OplogReplay);
+ ASSERT(c->more());
+ ASSERT_EQUALS(2, c->next().getIntField("ts"));
+ ASSERT(!c->more());
+
+ insert(ns, BSON("ts" << 3));
+ c = _client.query(ns,
+ QUERY("ts" << GT << 1).hint(BSON("$natural" << 1)),
+ 0,
+ 0,
+ 0,
+ QueryOption_OplogReplay);
+ ASSERT(c->more());
+ ASSERT_EQUALS(2, c->next().getIntField("ts"));
+ ASSERT(c->more());
+ }
+};
- class FullArray : public ClientBase {
- public:
- ~FullArray() {
- _client.dropCollection( "unittests.querytests.IndexedArray" );
- }
- void run() {
- const char *ns = "unittests.querytests.IndexedArray";
- _client.insert( ns, fromjson( "{a:[1,2,3]}" ) );
- ASSERT( _client.query( ns, Query( "{a:[1,2,3]}" ) )->more() );
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ) ));
- ASSERT( _client.query( ns, Query( "{a:{$in:[1,[1,2,3]]}}" ).hint( BSON( "a" << 1 ) ) )->more() );
- ASSERT( _client.query( ns, Query( "{a:[1,2,3]}" ).hint( BSON( "a" << 1 ) ) )->more() ); // SERVER-146
- }
- };
+class OplogReplaySlaveReadTill : public ClientBase {
+public:
+ ~OplogReplaySlaveReadTill() {
+ _client.dropCollection("unittests.querytests.OplogReplaySlaveReadTill");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.OplogReplaySlaveReadTill";
+ ScopedTransaction transaction(&_txn, MODE_IX);
+ Lock::DBLock lk(_txn.lockState(), "unittests", MODE_X);
+ Client::Context ctx(&_txn, ns);
+
+ BSONObj info;
+ _client.runCommand("unittests",
+ BSON("create"
+ << "querytests.OplogReplaySlaveReadTill"
+ << "capped" << true << "size" << 8192),
+ info);
+
+ Date_t one = getNextGlobalOptime().asDate();
+ Date_t two = getNextGlobalOptime().asDate();
+ Date_t three = getNextGlobalOptime().asDate();
+ insert(ns, BSON("ts" << one));
+ insert(ns, BSON("ts" << two));
+ insert(ns, BSON("ts" << three));
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns,
+ QUERY("ts" << GTE << two).hint(BSON("$natural" << 1)),
+ 0,
+ 0,
+ 0,
+ QueryOption_OplogReplay | QueryOption_CursorTailable);
+ ASSERT(c->more());
+ ASSERT_EQUALS(two, c->next()["ts"].Date());
+ long long cursorId = c->getCursorId();
+
+ ClientCursorPin clientCursor(ctx.db()->getCollection(ns)->getCursorManager(), cursorId);
+ ASSERT_EQUALS(three.millis, clientCursor.c()->getSlaveReadTill().asDate());
+ }
+};
- class InsideArray : public ClientBase {
- public:
- ~InsideArray() {
- _client.dropCollection( "unittests.querytests.InsideArray" );
- }
- void run() {
- const char *ns = "unittests.querytests.InsideArray";
- _client.insert( ns, fromjson( "{a:[[1],2]}" ) );
- check( "$natural" );
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ) ));
- check( "a" ); // SERVER-146
- }
- private:
- void check( const string &hintField ) {
- const char *ns = "unittests.querytests.InsideArray";
- ASSERT( _client.query( ns, Query( "{a:[[1],2]}" ).hint( BSON( hintField << 1 ) ) )->more() );
- ASSERT( _client.query( ns, Query( "{a:[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
- ASSERT( _client.query( ns, Query( "{a:2}" ).hint( BSON( hintField << 1 ) ) )->more() );
- ASSERT( !_client.query( ns, Query( "{a:1}" ).hint( BSON( hintField << 1 ) ) )->more() );
- }
- };
+class OplogReplayExplain : public ClientBase {
+public:
+ ~OplogReplayExplain() {
+ _client.dropCollection("unittests.querytests.OplogReplayExplain");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.OplogReplayExplain";
+ insert(ns, BSON("ts" << 0));
+ insert(ns, BSON("ts" << 1));
+ insert(ns, BSON("ts" << 2));
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns,
+ QUERY("ts" << GT << 1).hint(BSON("$natural" << 1)).explain(),
+ 0,
+ 0,
+ 0,
+ QueryOption_OplogReplay);
+ ASSERT(c->more());
+
+ // Check number of results and filterSet flag in explain.
+ // filterSet is not available in oplog replay mode.
+ BSONObj explainObj = c->next();
+ ASSERT(explainObj.hasField("executionStats"));
+ BSONObj execStats = explainObj["executionStats"].Obj();
+ ASSERT_EQUALS(1, execStats.getIntField("nReturned"));
+
+ ASSERT(!c->more());
+ }
+};
- class IndexInsideArrayCorrect : public ClientBase {
- public:
- ~IndexInsideArrayCorrect() {
- _client.dropCollection( "unittests.querytests.IndexInsideArrayCorrect" );
- }
- void run() {
- const char *ns = "unittests.querytests.IndexInsideArrayCorrect";
- _client.insert( ns, fromjson( "{'_id':1,a:[1]}" ) );
- _client.insert( ns, fromjson( "{'_id':2,a:[[1]]}" ) );
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ) ));
- ASSERT_EQUALS( 1, _client.query( ns, Query( "{a:[1]}" ).hint( BSON( "a" << 1 ) ) )->next().getIntField( "_id" ) );
- }
- };
+class BasicCount : public ClientBase {
+public:
+ ~BasicCount() {
+ _client.dropCollection("unittests.querytests.BasicCount");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.BasicCount";
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ count(0);
+ insert(ns, BSON("a" << 3));
+ count(0);
+ insert(ns, BSON("a" << 4));
+ count(1);
+ insert(ns, BSON("a" << 5));
+ count(1);
+ insert(ns, BSON("a" << 4));
+ count(2);
+ }
- class SubobjArr : public ClientBase {
- public:
- ~SubobjArr() {
- _client.dropCollection( "unittests.querytests.SubobjArr" );
- }
- void run() {
- const char *ns = "unittests.querytests.SubobjArr";
- _client.insert( ns, fromjson( "{a:[{b:[1]}]}" ) );
- check( "$natural" );
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "a" << 1 ) ));
- check( "a" );
- }
- private:
- void check( const string &hintField ) {
- const char *ns = "unittests.querytests.SubobjArr";
- ASSERT( _client.query( ns, Query( "{'a.b':1}" ).hint( BSON( hintField << 1 ) ) )->more() );
- ASSERT( _client.query( ns, Query( "{'a.b':[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
- }
- };
+private:
+ void count(unsigned long long c) {
+ ASSERT_EQUALS(c, _client.count("unittests.querytests.BasicCount", BSON("a" << 4)));
+ }
+};
- class MinMax : public ClientBase {
- public:
- MinMax() : ns( "unittests.querytests.MinMax" ) {}
- ~MinMax() {
- _client.dropCollection( "unittests.querytests.MinMax" );
- }
- void run() {
- ASSERT_OK(dbtests::createIndex( &_txn, ns, BSON( "a" << 1 << "b" << 1 ) ));
- _client.insert( ns, BSON( "a" << 1 << "b" << 1 ) );
- _client.insert( ns, BSON( "a" << 1 << "b" << 2 ) );
- _client.insert( ns, BSON( "a" << 2 << "b" << 1 ) );
- _client.insert( ns, BSON( "a" << 2 << "b" << 2 ) );
-
- ASSERT_EQUALS( 4, count( _client.query( ns, BSONObj() ) ) );
- BSONObj hints[] = { BSONObj(), BSON( "a" << 1 << "b" << 1 ) };
- for( int i = 0; i < 2; ++i ) {
- check( 0, 0, 3, 3, 4, hints[ i ] );
- check( 1, 1, 2, 2, 3, hints[ i ] );
- check( 1, 2, 2, 2, 2, hints[ i ] );
- check( 1, 2, 2, 1, 1, hints[ i ] );
-
- auto_ptr< DBClientCursor > c = query( 1, 2, 2, 2, hints[ i ] );
- BSONObj obj = c->next();
- ASSERT_EQUALS( 1, obj.getIntField( "a" ) );
- ASSERT_EQUALS( 2, obj.getIntField( "b" ) );
- obj = c->next();
- ASSERT_EQUALS( 2, obj.getIntField( "a" ) );
- ASSERT_EQUALS( 1, obj.getIntField( "b" ) );
- ASSERT( !c->more() );
- }
- }
- private:
- auto_ptr< DBClientCursor > query( int minA, int minB, int maxA, int maxB, const BSONObj &hint ) {
- Query q;
- q = q.minKey( BSON( "a" << minA << "b" << minB ) ).maxKey( BSON( "a" << maxA << "b" << maxB ) );
- if ( !hint.isEmpty() )
- q.hint( hint );
- return _client.query( ns, q );
- }
- void check( int minA, int minB, int maxA, int maxB, int expectedCount, const BSONObj &hint = empty_ ) {
- ASSERT_EQUALS( expectedCount, count( query( minA, minB, maxA, maxB, hint ) ) );
- }
- int count( auto_ptr< DBClientCursor > c ) {
- int ret = 0;
- while( c->more() ) {
- ++ret;
- c->next();
- }
- return ret;
- }
- const char *ns;
- static BSONObj empty_;
- };
- BSONObj MinMax::empty_;
-
- class MatchCodeCodeWScope : public ClientBase {
- public:
- MatchCodeCodeWScope() : _ns( "unittests.querytests.MatchCodeCodeWScope" ) {}
- ~MatchCodeCodeWScope() {
- _client.dropCollection( "unittests.querytests.MatchCodeCodeWScope" );
- }
- void run() {
- checkMatch();
- ASSERT_OK(dbtests::createIndex( &_txn, _ns, BSON( "a" << 1 ) ));
- checkMatch();
- }
- private:
- void checkMatch() {
- _client.remove( _ns, BSONObj() );
-
- _client.insert( _ns, code() );
- _client.insert( _ns, codeWScope() );
-
- ASSERT_EQUALS( 1U, _client.count( _ns, code() ) );
- ASSERT_EQUALS( 1U, _client.count( _ns, codeWScope() ) );
-
- ASSERT_EQUALS( 1U, _client.count( _ns, BSON( "a" << BSON( "$type" << (int)Code ) ) ) );
- ASSERT_EQUALS( 1U, _client.count( _ns, BSON( "a" << BSON( "$type" << (int)CodeWScope ) ) ) );
- }
- BSONObj code() const {
- BSONObjBuilder codeBuilder;
- codeBuilder.appendCode( "a", "return 1;" );
- return codeBuilder.obj();
- }
- BSONObj codeWScope() const {
- BSONObjBuilder codeWScopeBuilder;
- codeWScopeBuilder.appendCodeWScope( "a", "return 1;", BSONObj() );
- return codeWScopeBuilder.obj();
- }
- const char *_ns;
- };
-
- class MatchDBRefType : public ClientBase {
- public:
- MatchDBRefType() : _ns( "unittests.querytests.MatchDBRefType" ) {}
- ~MatchDBRefType() {
- _client.dropCollection( "unittests.querytests.MatchDBRefType" );
- }
- void run() {
- checkMatch();
- ASSERT_OK(dbtests::createIndex( &_txn, _ns, BSON( "a" << 1 ) ));
- checkMatch();
- }
- private:
- void checkMatch() {
- _client.remove( _ns, BSONObj() );
- _client.insert( _ns, dbref() );
- ASSERT_EQUALS( 1U, _client.count( _ns, dbref() ) );
- ASSERT_EQUALS( 1U, _client.count( _ns, BSON( "a" << BSON( "$type" << (int)DBRef ) ) ) );
- }
- BSONObj dbref() const {
- BSONObjBuilder b;
- OID oid;
- b.appendDBRef( "a", "ns", oid );
- return b.obj();
- }
- const char *_ns;
- };
-
- class DirectLocking : public ClientBase {
- public:
- void run() {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- Client::Context ctx(&_txn, "unittests.DirectLocking");
- _client.remove( "a.b", BSONObj() );
- ASSERT_EQUALS( "unittests", ctx.db()->name() );
- }
- const char *ns;
- };
+class ArrayId : public ClientBase {
+public:
+ ~ArrayId() {
+ _client.dropCollection("unittests.querytests.ArrayId");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.ArrayId";
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("_id" << 1)));
+ ASSERT(!error());
+ _client.insert(ns, fromjson("{'_id':[1,2]}"));
+ ASSERT(error());
+ }
+};
- class FastCountIn : public ClientBase {
- public:
- ~FastCountIn() {
- _client.dropCollection( "unittests.querytests.FastCountIn" );
- }
- void run() {
- const char *ns = "unittests.querytests.FastCountIn";
- _client.insert( ns, BSON( "i" << "a" ) );
- ASSERT_OK(dbtests::createIndex( &_txn, ns, BSON( "i" << 1 ) ));
- ASSERT_EQUALS( 1U, _client.count( ns, fromjson( "{i:{$in:['a']}}" ) ) );
- }
- };
+class UnderscoreNs : public ClientBase {
+public:
+ ~UnderscoreNs() {
+ _client.dropCollection("unittests.querytests._UnderscoreNs");
+ }
+ void run() {
+ ASSERT(!error());
+ const char* ns = "unittests.querytests._UnderscoreNs";
+ ASSERT(_client.findOne(ns, "{}").isEmpty());
+ _client.insert(ns, BSON("a" << 1));
+ ASSERT_EQUALS(1, _client.findOne(ns, "{}").getIntField("a"));
+ ASSERT(!error());
+ }
+};
- class EmbeddedArray : public ClientBase {
- public:
- ~EmbeddedArray() {
- _client.dropCollection( "unittests.querytests.EmbeddedArray" );
- }
- void run() {
- const char *ns = "unittests.querytests.EmbeddedArray";
- _client.insert( ns, fromjson( "{foo:{bar:['spam']}}" ) );
- _client.insert( ns, fromjson( "{foo:{bar:['spam','eggs']}}" ) );
- _client.insert( ns, fromjson( "{bar:['spam']}" ) );
- _client.insert( ns, fromjson( "{bar:['spam','eggs']}" ) );
- ASSERT_EQUALS( 2U, _client.count( ns, BSON( "bar" << "spam" ) ) );
- ASSERT_EQUALS( 2U, _client.count( ns, BSON( "foo.bar" << "spam" ) ) );
- }
- };
+class EmptyFieldSpec : public ClientBase {
+public:
+ ~EmptyFieldSpec() {
+ _client.dropCollection("unittests.querytests.EmptyFieldSpec");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.EmptyFieldSpec";
+ _client.insert(ns, BSON("a" << 1));
+ ASSERT(!_client.findOne(ns, "").isEmpty());
+ BSONObj empty;
+ ASSERT(!_client.findOne(ns, "", &empty).isEmpty());
+ }
+};
- class DifferentNumbers : public ClientBase {
- public:
- ~DifferentNumbers() {
- _client.dropCollection( "unittests.querytests.DifferentNumbers" );
- }
- void t( const char * ns ) {
- auto_ptr< DBClientCursor > cursor = _client.query( ns, Query().sort( "7" ) );
- while ( cursor->more() ) {
- BSONObj o = cursor->next();
- verify( o.valid() );
- //cout << " foo " << o << endl;
- }
+class MultiNe : public ClientBase {
+public:
+ ~MultiNe() {
+ _client.dropCollection("unittests.querytests.Ne");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.Ne";
+ _client.insert(ns, fromjson("{a:[1,2]}"));
+ ASSERT(_client.findOne(ns, fromjson("{a:{$ne:1}}")).isEmpty());
+ BSONObj spec = fromjson("{a:{$ne:1,$ne:2}}");
+ ASSERT(_client.findOne(ns, spec).isEmpty());
+ }
+};
- }
- void run() {
- const char *ns = "unittests.querytests.DifferentNumbers";
- { BSONObjBuilder b; b.append( "7" , (int)4 ); _client.insert( ns , b.obj() ); }
- { BSONObjBuilder b; b.append( "7" , (long long)2 ); _client.insert( ns , b.obj() ); }
- { BSONObjBuilder b; b.appendNull( "7" ); _client.insert( ns , b.obj() ); }
- { BSONObjBuilder b; b.append( "7" , "b" ); _client.insert( ns , b.obj() ); }
- { BSONObjBuilder b; b.appendNull( "8" ); _client.insert( ns , b.obj() ); }
- { BSONObjBuilder b; b.append( "7" , (double)3.7 ); _client.insert( ns , b.obj() ); }
-
- t(ns);
- ASSERT_OK(dbtests::createIndex( &_txn, ns , BSON( "7" << 1 ) ));
- t(ns);
- }
- };
+class EmbeddedNe : public ClientBase {
+public:
+ ~EmbeddedNe() {
+ _client.dropCollection("unittests.querytests.NestedNe");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.NestedNe";
+ _client.insert(ns, fromjson("{a:[{b:1},{b:2}]}"));
+ ASSERT(_client.findOne(ns, fromjson("{'a.b':{$ne:1}}")).isEmpty());
+ }
+};
- class CollectionBase : public ClientBase {
- public:
+class EmbeddedNumericTypes : public ClientBase {
+public:
+ ~EmbeddedNumericTypes() {
+ _client.dropCollection("unittests.querytests.NumericEmbedded");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.NumericEmbedded";
+ _client.insert(ns, BSON("a" << BSON("b" << 1)));
+ ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty());
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty());
+ }
+};
- CollectionBase( string leaf ) {
- _ns = "unittests.querytests.";
- _ns += leaf;
- _client.dropCollection( ns() );
- }
+class AutoResetIndexCache : public ClientBase {
+public:
+ ~AutoResetIndexCache() {
+ _client.dropCollection("unittests.querytests.AutoResetIndexCache");
+ }
+ static const char* ns() {
+ return "unittests.querytests.AutoResetIndexCache";
+ }
+ static const char* idxNs() {
+ return "unittests.system.indexes";
+ }
+ void index() {
+ ASSERT_EQUALS(2u, _client.getIndexSpecs(ns()).size());
+ }
+ void noIndex() {
+ ASSERT_EQUALS(0u, _client.getIndexSpecs(ns()).size());
+ }
+ void checkIndex() {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1)));
+ index();
+ }
+ void run() {
+ _client.dropDatabase("unittests");
+ noIndex();
+ checkIndex();
+ _client.dropCollection(ns());
+ noIndex();
+ checkIndex();
+ _client.dropDatabase("unittests");
+ noIndex();
+ checkIndex();
+ }
+};
- virtual ~CollectionBase() {
- _client.dropCollection( ns() );
- }
+class UniqueIndex : public ClientBase {
+public:
+ ~UniqueIndex() {
+ _client.dropCollection("unittests.querytests.UniqueIndex");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.UniqueIndex";
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1), true));
+ _client.insert(ns, BSON("a" << 4 << "b" << 2));
+ _client.insert(ns, BSON("a" << 4 << "b" << 3));
+ ASSERT_EQUALS(1U, _client.count(ns, BSONObj()));
+ _client.dropCollection(ns);
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("b" << 1), true));
+ _client.insert(ns, BSON("a" << 4 << "b" << 2));
+ _client.insert(ns, BSON("a" << 4 << "b" << 3));
+ ASSERT_EQUALS(2U, _client.count(ns, BSONObj()));
+ }
+};
- int count() {
- return (int) _client.count( ns() );
- }
+class UniqueIndexPreexistingData : public ClientBase {
+public:
+ ~UniqueIndexPreexistingData() {
+ _client.dropCollection("unittests.querytests.UniqueIndexPreexistingData");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.UniqueIndexPreexistingData";
+ _client.insert(ns, BSON("a" << 4 << "b" << 2));
+ _client.insert(ns, BSON("a" << 4 << "b" << 3));
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey,
+ dbtests::createIndex(&_txn, ns, BSON("a" << 1), true));
+ ASSERT_EQUALS(
+ 0U,
+ _client.count("unittests.system.indexes", BSON("ns" << ns << "name" << NE << "_id_")));
+ }
+};
- size_t numCursorsOpen() {
- AutoGetCollectionForRead ctx(&_txn, _ns);
- Collection* collection = ctx.getCollection();
- if ( !collection )
- return 0;
- return collection->getCursorManager()->numCursors();
- }
+class SubobjectInArray : public ClientBase {
+public:
+ ~SubobjectInArray() {
+ _client.dropCollection("unittests.querytests.SubobjectInArray");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.SubobjectInArray";
+ _client.insert(ns, fromjson("{a:[{b:{c:1}}]}"));
+ ASSERT(!_client.findOne(ns, BSON("a.b.c" << 1)).isEmpty());
+ ASSERT(!_client.findOne(ns, fromjson("{'a.c':null}")).isEmpty());
+ }
+};
- const char * ns() {
- return _ns.c_str();
- }
+class Size : public ClientBase {
+public:
+ ~Size() {
+ _client.dropCollection("unittests.querytests.Size");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.Size";
+ _client.insert(ns, fromjson("{a:[1,2,3]}"));
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT(_client.query(ns, QUERY("a" << mongo::BSIZE << 3).hint(BSON("a" << 1)))->more());
+ }
+};
- private:
- string _ns;
- };
-
- class SymbolStringSame : public CollectionBase {
- public:
- SymbolStringSame() : CollectionBase( "symbolstringsame" ) {}
-
- void run() {
- { BSONObjBuilder b; b.appendSymbol( "x" , "eliot" ); b.append( "z" , 17 ); _client.insert( ns() , b.obj() ); }
- ASSERT_EQUALS( 17 , _client.findOne( ns() , BSONObj() )["z"].number() );
- {
- BSONObjBuilder b;
- b.appendSymbol( "x" , "eliot" );
- ASSERT_EQUALS( 17 , _client.findOne( ns() , b.obj() )["z"].number() );
- }
- ASSERT_EQUALS( 17 , _client.findOne( ns() , BSON( "x" << "eliot" ) )["z"].number() );
- ASSERT_OK(dbtests::createIndex( &_txn, ns() , BSON( "x" << 1 ) ));
- ASSERT_EQUALS( 17 , _client.findOne( ns() , BSON( "x" << "eliot" ) )["z"].number() );
- }
- };
+class FullArray : public ClientBase {
+public:
+ ~FullArray() {
+ _client.dropCollection("unittests.querytests.IndexedArray");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.IndexedArray";
+ _client.insert(ns, fromjson("{a:[1,2,3]}"));
+ ASSERT(_client.query(ns, Query("{a:[1,2,3]}"))->more());
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT(_client.query(ns, Query("{a:{$in:[1,[1,2,3]]}}").hint(BSON("a" << 1)))->more());
+ ASSERT(_client.query(ns, Query("{a:[1,2,3]}").hint(BSON("a" << 1)))->more()); // SERVER-146
+ }
+};
- class TailableCappedRaceCondition : public CollectionBase {
- public:
+class InsideArray : public ClientBase {
+public:
+ ~InsideArray() {
+ _client.dropCollection("unittests.querytests.InsideArray");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.InsideArray";
+ _client.insert(ns, fromjson("{a:[[1],2]}"));
+ check("$natural");
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ check("a"); // SERVER-146
+ }
- TailableCappedRaceCondition() : CollectionBase( "tailablecappedrace" ) {
- _client.dropCollection( ns() );
- _n = 0;
- }
- void run() {
- string err;
- Client::WriteContext ctx(&_txn, ns());
+private:
+ void check(const string& hintField) {
+ const char* ns = "unittests.querytests.InsideArray";
+ ASSERT(_client.query(ns, Query("{a:[[1],2]}").hint(BSON(hintField << 1)))->more());
+ ASSERT(_client.query(ns, Query("{a:[1]}").hint(BSON(hintField << 1)))->more());
+ ASSERT(_client.query(ns, Query("{a:2}").hint(BSON(hintField << 1)))->more());
+ ASSERT(!_client.query(ns, Query("{a:1}").hint(BSON(hintField << 1)))->more());
+ }
+};
- // note that extents are always at least 4KB now - so this will get rounded up
- // a bit.
- {
- WriteUnitOfWork wunit(&_txn);
- ASSERT( userCreateNS(&_txn, ctx.db(), ns(),
- fromjson( "{ capped : true, size : 2000, max: 10000 }" ), false ).isOK() );
- wunit.commit();
- }
+class IndexInsideArrayCorrect : public ClientBase {
+public:
+ ~IndexInsideArrayCorrect() {
+ _client.dropCollection("unittests.querytests.IndexInsideArrayCorrect");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.IndexInsideArrayCorrect";
+ _client.insert(ns, fromjson("{'_id':1,a:[1]}"));
+ _client.insert(ns, fromjson("{'_id':2,a:[[1]]}"));
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_EQUALS(
+ 1, _client.query(ns, Query("{a:[1]}").hint(BSON("a" << 1)))->next().getIntField("_id"));
+ }
+};
- for (int i = 0; i < 200; i++) {
- insertNext();
- ASSERT(count() < 90);
- }
+class SubobjArr : public ClientBase {
+public:
+ ~SubobjArr() {
+ _client.dropCollection("unittests.querytests.SubobjArr");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.SubobjArr";
+ _client.insert(ns, fromjson("{a:[{b:[1]}]}"));
+ check("$natural");
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ check("a");
+ }
- int a = count();
+private:
+ void check(const string& hintField) {
+ const char* ns = "unittests.querytests.SubobjArr";
+ ASSERT(_client.query(ns, Query("{'a.b':1}").hint(BSON(hintField << 1)))->more());
+ ASSERT(_client.query(ns, Query("{'a.b':[1]}").hint(BSON(hintField << 1)))->more());
+ }
+};
- auto_ptr< DBClientCursor > c = _client.query( ns() , QUERY( "i" << GT << 0 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_CursorTailable );
- int n=0;
- while ( c->more() ) {
- BSONObj z = c->next();
- n++;
- }
+class MinMax : public ClientBase {
+public:
+ MinMax() : ns("unittests.querytests.MinMax") {}
+ ~MinMax() {
+ _client.dropCollection("unittests.querytests.MinMax");
+ }
+ void run() {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1 << "b" << 1)));
+ _client.insert(ns, BSON("a" << 1 << "b" << 1));
+ _client.insert(ns, BSON("a" << 1 << "b" << 2));
+ _client.insert(ns, BSON("a" << 2 << "b" << 1));
+ _client.insert(ns, BSON("a" << 2 << "b" << 2));
+
+ ASSERT_EQUALS(4, count(_client.query(ns, BSONObj())));
+ BSONObj hints[] = {BSONObj(), BSON("a" << 1 << "b" << 1)};
+ for (int i = 0; i < 2; ++i) {
+ check(0, 0, 3, 3, 4, hints[i]);
+ check(1, 1, 2, 2, 3, hints[i]);
+ check(1, 2, 2, 2, 2, hints[i]);
+ check(1, 2, 2, 1, 1, hints[i]);
+
+ auto_ptr<DBClientCursor> c = query(1, 2, 2, 2, hints[i]);
+ BSONObj obj = c->next();
+ ASSERT_EQUALS(1, obj.getIntField("a"));
+ ASSERT_EQUALS(2, obj.getIntField("b"));
+ obj = c->next();
+ ASSERT_EQUALS(2, obj.getIntField("a"));
+ ASSERT_EQUALS(1, obj.getIntField("b"));
+ ASSERT(!c->more());
+ }
+ }
- ASSERT_EQUALS( a , n );
+private:
+ auto_ptr<DBClientCursor> query(int minA, int minB, int maxA, int maxB, const BSONObj& hint) {
+ Query q;
+ q = q.minKey(BSON("a" << minA << "b" << minB)).maxKey(BSON("a" << maxA << "b" << maxB));
+ if (!hint.isEmpty())
+ q.hint(hint);
+ return _client.query(ns, q);
+ }
+ void check(
+ int minA, int minB, int maxA, int maxB, int expectedCount, const BSONObj& hint = empty_) {
+ ASSERT_EQUALS(expectedCount, count(query(minA, minB, maxA, maxB, hint)));
+ }
+ int count(auto_ptr<DBClientCursor> c) {
+ int ret = 0;
+ while (c->more()) {
+ ++ret;
+ c->next();
+ }
+ return ret;
+ }
+ const char* ns;
+ static BSONObj empty_;
+};
+BSONObj MinMax::empty_;
+
+class MatchCodeCodeWScope : public ClientBase {
+public:
+ MatchCodeCodeWScope() : _ns("unittests.querytests.MatchCodeCodeWScope") {}
+ ~MatchCodeCodeWScope() {
+ _client.dropCollection("unittests.querytests.MatchCodeCodeWScope");
+ }
+ void run() {
+ checkMatch();
+ ASSERT_OK(dbtests::createIndex(&_txn, _ns, BSON("a" << 1)));
+ checkMatch();
+ }
- insertNext();
- ASSERT( c->more() );
+private:
+ void checkMatch() {
+ _client.remove(_ns, BSONObj());
- for ( int i=0; i<90; i++ ) {
- insertNext();
- }
+ _client.insert(_ns, code());
+ _client.insert(_ns, codeWScope());
- while ( c->more() ) { c->next(); }
- }
+ ASSERT_EQUALS(1U, _client.count(_ns, code()));
+ ASSERT_EQUALS(1U, _client.count(_ns, codeWScope()));
- void insertNext() {
- BSONObjBuilder b;
- b.appendOID("_id", 0, true);
- b.append("i", _n++);
- insert( ns() , b.obj() );
- }
+ ASSERT_EQUALS(1U, _client.count(_ns, BSON("a" << BSON("$type" << (int)Code))));
+ ASSERT_EQUALS(1U, _client.count(_ns, BSON("a" << BSON("$type" << (int)CodeWScope))));
+ }
+ BSONObj code() const {
+ BSONObjBuilder codeBuilder;
+ codeBuilder.appendCode("a", "return 1;");
+ return codeBuilder.obj();
+ }
+ BSONObj codeWScope() const {
+ BSONObjBuilder codeWScopeBuilder;
+ codeWScopeBuilder.appendCodeWScope("a", "return 1;", BSONObj());
+ return codeWScopeBuilder.obj();
+ }
+ const char* _ns;
+};
+
+class MatchDBRefType : public ClientBase {
+public:
+ MatchDBRefType() : _ns("unittests.querytests.MatchDBRefType") {}
+ ~MatchDBRefType() {
+ _client.dropCollection("unittests.querytests.MatchDBRefType");
+ }
+ void run() {
+ checkMatch();
+ ASSERT_OK(dbtests::createIndex(&_txn, _ns, BSON("a" << 1)));
+ checkMatch();
+ }
- int _n;
- };
+private:
+ void checkMatch() {
+ _client.remove(_ns, BSONObj());
+ _client.insert(_ns, dbref());
+ ASSERT_EQUALS(1U, _client.count(_ns, dbref()));
+ ASSERT_EQUALS(1U, _client.count(_ns, BSON("a" << BSON("$type" << (int)DBRef))));
+ }
+ BSONObj dbref() const {
+ BSONObjBuilder b;
+ OID oid;
+ b.appendDBRef("a", "ns", oid);
+ return b.obj();
+ }
+ const char* _ns;
+};
+
+class DirectLocking : public ClientBase {
+public:
+ void run() {
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ Client::Context ctx(&_txn, "unittests.DirectLocking");
+ _client.remove("a.b", BSONObj());
+ ASSERT_EQUALS("unittests", ctx.db()->name());
+ }
+ const char* ns;
+};
- class HelperTest : public CollectionBase {
- public:
+class FastCountIn : public ClientBase {
+public:
+ ~FastCountIn() {
+ _client.dropCollection("unittests.querytests.FastCountIn");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.FastCountIn";
+ _client.insert(ns,
+ BSON("i"
+ << "a"));
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("i" << 1)));
+ ASSERT_EQUALS(1U, _client.count(ns, fromjson("{i:{$in:['a']}}")));
+ }
+};
- HelperTest() : CollectionBase( "helpertest" ) {
+class EmbeddedArray : public ClientBase {
+public:
+ ~EmbeddedArray() {
+ _client.dropCollection("unittests.querytests.EmbeddedArray");
+ }
+ void run() {
+ const char* ns = "unittests.querytests.EmbeddedArray";
+ _client.insert(ns, fromjson("{foo:{bar:['spam']}}"));
+ _client.insert(ns, fromjson("{foo:{bar:['spam','eggs']}}"));
+ _client.insert(ns, fromjson("{bar:['spam']}"));
+ _client.insert(ns, fromjson("{bar:['spam','eggs']}"));
+ ASSERT_EQUALS(2U,
+ _client.count(ns,
+ BSON("bar"
+ << "spam")));
+ ASSERT_EQUALS(2U,
+ _client.count(ns,
+ BSON("foo.bar"
+ << "spam")));
+ }
+};
+
+class DifferentNumbers : public ClientBase {
+public:
+ ~DifferentNumbers() {
+ _client.dropCollection("unittests.querytests.DifferentNumbers");
+ }
+ void t(const char* ns) {
+ auto_ptr<DBClientCursor> cursor = _client.query(ns, Query().sort("7"));
+ while (cursor->more()) {
+ BSONObj o = cursor->next();
+ verify(o.valid());
+ // cout << " foo " << o << endl;
+ }
+ }
+ void run() {
+ const char* ns = "unittests.querytests.DifferentNumbers";
+ {
+ BSONObjBuilder b;
+ b.append("7", (int)4);
+ _client.insert(ns, b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.append("7", (long long)2);
+ _client.insert(ns, b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.appendNull("7");
+ _client.insert(ns, b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.append("7", "b");
+ _client.insert(ns, b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.appendNull("8");
+ _client.insert(ns, b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.append("7", (double)3.7);
+ _client.insert(ns, b.obj());
}
- void run() {
- Client::WriteContext ctx(&_txn, ns());
+ t(ns);
+ ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("7" << 1)));
+ t(ns);
+ }
+};
+
+class CollectionBase : public ClientBase {
+public:
+ CollectionBase(string leaf) {
+ _ns = "unittests.querytests.";
+ _ns += leaf;
+ _client.dropCollection(ns());
+ }
- for ( int i=0; i<50; i++ ) {
- insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
- }
+ virtual ~CollectionBase() {
+ _client.dropCollection(ns());
+ }
- ASSERT_EQUALS( 50 , count() );
+ int count() {
+ return (int)_client.count(ns());
+ }
- BSONObj res;
- ASSERT( Helpers::findOne(&_txn, ctx.getCollection(),
- BSON("_id" << 20) , res , true));
- ASSERT_EQUALS( 40 , res["x"].numberInt() );
+ size_t numCursorsOpen() {
+ AutoGetCollectionForRead ctx(&_txn, _ns);
+ Collection* collection = ctx.getCollection();
+ if (!collection)
+ return 0;
+ return collection->getCursorManager()->numCursors();
+ }
- ASSERT( Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) );
- ASSERT_EQUALS( 40 , res["x"].numberInt() );
+ const char* ns() {
+ return _ns.c_str();
+ }
- ASSERT( ! Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << 200 ) , res ) );
+private:
+ string _ns;
+};
- long long slow;
- long long fast;
+class SymbolStringSame : public CollectionBase {
+public:
+ SymbolStringSame() : CollectionBase("symbolstringsame") {}
- int n = 10000;
- DEV n = 1000;
- {
- Timer t;
- for ( int i=0; i<n; i++ ) {
- ASSERT( Helpers::findOne(&_txn, ctx.getCollection(),
- BSON( "_id" << 20 ), res, true ) );
- }
- slow = t.micros();
- }
- {
- Timer t;
- for ( int i=0; i<n; i++ ) {
- ASSERT( Helpers::findById(&_txn, ctx.db(), ns() , BSON( "_id" << 20 ) , res ) );
- }
- fast = t.micros();
- }
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.appendSymbol("x", "eliot");
+ b.append("z", 17);
+ _client.insert(ns(), b.obj());
+ }
+ ASSERT_EQUALS(17, _client.findOne(ns(), BSONObj())["z"].number());
+ {
+ BSONObjBuilder b;
+ b.appendSymbol("x", "eliot");
+ ASSERT_EQUALS(17, _client.findOne(ns(), b.obj())["z"].number());
+ }
+ ASSERT_EQUALS(17,
+ _client.findOne(ns(),
+ BSON("x"
+ << "eliot"))["z"].number());
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("x" << 1)));
+ ASSERT_EQUALS(17,
+ _client.findOne(ns(),
+ BSON("x"
+ << "eliot"))["z"].number());
+ }
+};
- cout << "HelperTest slow:" << slow << " fast:" << fast << endl;
+class TailableCappedRaceCondition : public CollectionBase {
+public:
+ TailableCappedRaceCondition() : CollectionBase("tailablecappedrace") {
+ _client.dropCollection(ns());
+ _n = 0;
+ }
+ void run() {
+ string err;
+ Client::WriteContext ctx(&_txn, ns());
+ // note that extents are always at least 4KB now - so this will get rounded up
+ // a bit.
+ {
+ WriteUnitOfWork wunit(&_txn);
+ ASSERT(userCreateNS(&_txn,
+ ctx.db(),
+ ns(),
+ fromjson("{ capped : true, size : 2000, max: 10000 }"),
+ false).isOK());
+ wunit.commit();
}
- };
- class HelperByIdTest : public CollectionBase {
- public:
-
- HelperByIdTest() : CollectionBase( "helpertestbyid" ) {
+ for (int i = 0; i < 200; i++) {
+ insertNext();
+ ASSERT(count() < 90);
}
- void run() {
- Client::WriteContext ctx(&_txn, ns());
+ int a = count();
- for ( int i=0; i<1000; i++ ) {
- insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
- }
- for ( int i=0; i<1000; i+=2 ) {
- _client.remove( ns() , BSON( "_id" << i ) );
- }
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns(),
+ QUERY("i" << GT << 0).hint(BSON("$natural" << 1)),
+ 0,
+ 0,
+ 0,
+ QueryOption_CursorTailable);
+ int n = 0;
+ while (c->more()) {
+ BSONObj z = c->next();
+ n++;
+ }
- BSONObj res;
- for ( int i=0; i<1000; i++ ) {
- bool found = Helpers::findById( &_txn, ctx.db(), ns() , BSON( "_id" << i ) , res );
- ASSERT_EQUALS( i % 2 , int(found) );
- }
+ ASSERT_EQUALS(a, n);
+
+ insertNext();
+ ASSERT(c->more());
+ for (int i = 0; i < 90; i++) {
+ insertNext();
}
- };
- class ClientCursorTest : public CollectionBase {
- ClientCursorTest() : CollectionBase( "clientcursortest" ) {
+ while (c->more()) {
+ c->next();
}
+ }
- void run() {
- Client::WriteContext ctx(&_txn, ns());
+ void insertNext() {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.append("i", _n++);
+ insert(ns(), b.obj());
+ }
- for ( int i=0; i<1000; i++ ) {
- insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
- }
+ int _n;
+};
- }
- };
+class HelperTest : public CollectionBase {
+public:
+ HelperTest() : CollectionBase("helpertest") {}
+
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
- class FindingStart : public CollectionBase {
- public:
- FindingStart() : CollectionBase( "findingstart" ) {
+ for (int i = 0; i < 50; i++) {
+ insert(ns(), BSON("_id" << i << "x" << i * 2));
}
- void run() {
- cout << "1 SFDSDF" << endl;
- BSONObj info;
- ASSERT( _client.runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+ ASSERT_EQUALS(50, count());
- int i = 0;
- int max = 1;
+ BSONObj res;
+ ASSERT(Helpers::findOne(&_txn, ctx.getCollection(), BSON("_id" << 20), res, true));
+ ASSERT_EQUALS(40, res["x"].numberInt());
- while ( 1 ) {
- int oldCount = count();
- _client.insert( ns(), BSON( "ts" << i++ ) );
- int newCount = count();
- if ( oldCount == newCount ||
- newCount < max )
- break;
+ ASSERT(Helpers::findById(&_txn, ctx.ctx().db(), ns(), BSON("_id" << 20), res));
+ ASSERT_EQUALS(40, res["x"].numberInt());
- if ( newCount > max )
- max = newCount;
- }
+ ASSERT(!Helpers::findById(&_txn, ctx.ctx().db(), ns(), BSON("_id" << 200), res));
+
+ long long slow;
+ long long fast;
- for( int k = 0; k < 5; ++k ) {
- _client.insert( ns(), BSON( "ts" << i++ ) );
- int min = _client.query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
- for( int j = -1; j < i; ++j ) {
- auto_ptr< DBClientCursor > c = _client.query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( c->more() );
- BSONObj next = c->next();
- ASSERT( !next[ "ts" ].eoo() );
- ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
- }
- cout << k << endl;
+ int n = 10000;
+ DEV n = 1000;
+ {
+ Timer t;
+ for (int i = 0; i < n; i++) {
+ ASSERT(Helpers::findOne(&_txn, ctx.getCollection(), BSON("_id" << 20), res, true));
}
+ slow = t.micros();
}
- };
-
- class FindingStartPartiallyFull : public CollectionBase {
- public:
- FindingStartPartiallyFull() : CollectionBase( "findingstart" ) {
+ {
+ Timer t;
+ for (int i = 0; i < n; i++) {
+ ASSERT(Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << 20), res));
+ }
+ fast = t.micros();
}
- void run() {
- cout << "2 ;kljsdf" << endl;
- size_t startNumCursors = numCursorsOpen();
-
- BSONObj info;
- ASSERT( _client.runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
-
- int i = 0;
- for( ; i < 150; _client.insert( ns(), BSON( "ts" << i++ ) ) );
-
- for( int k = 0; k < 5; ++k ) {
- _client.insert( ns(), BSON( "ts" << i++ ) );
- int min = _client.query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
- for( int j = -1; j < i; ++j ) {
- auto_ptr< DBClientCursor > c = _client.query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( c->more() );
- BSONObj next = c->next();
- ASSERT( !next[ "ts" ].eoo() );
- ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
- }
- cout << k << endl;
- }
+ cout << "HelperTest slow:" << slow << " fast:" << fast << endl;
+ }
+};
- ASSERT_EQUALS( startNumCursors, numCursorsOpen() );
- }
- };
-
- /**
- * Check OplogReplay mode where query timestamp is earlier than the earliest
- * entry in the collection.
- */
- class FindingStartStale : public CollectionBase {
- public:
- FindingStartStale() : CollectionBase( "findingstart" ) {}
-
- void run() {
- cout << "3 xcxcv" << endl;
- size_t startNumCursors = numCursorsOpen();
-
- // Check OplogReplay mode with missing collection.
- auto_ptr< DBClientCursor > c0 = _client.query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( !c0->more() );
-
- BSONObj info;
- ASSERT( _client.runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
-
- // Check OplogReplay mode with empty collection.
- auto_ptr< DBClientCursor > c = _client.query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( !c->more() );
-
- // Check with some docs in the collection.
- for( int i = 100; i < 150; _client.insert( ns(), BSON( "ts" << i++ ) ) );
- c = _client.query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
- ASSERT( c->more() );
- ASSERT_EQUALS( 100, c->next()[ "ts" ].numberInt() );
-
- // Check that no persistent cursors outlast our queries above.
- ASSERT_EQUALS( startNumCursors, numCursorsOpen() );
+class HelperByIdTest : public CollectionBase {
+public:
+ HelperByIdTest() : CollectionBase("helpertestbyid") {}
+
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ for (int i = 0; i < 1000; i++) {
+ insert(ns(), BSON("_id" << i << "x" << i * 2));
}
- };
-
- class WhatsMyUri : public CollectionBase {
- public:
- WhatsMyUri() : CollectionBase( "whatsmyuri" ) {}
- void run() {
- BSONObj result;
- _client.runCommand( "admin", BSON( "whatsmyuri" << 1 ), result );
- ASSERT_EQUALS( unknownAddress.toString(), result[ "you" ].str() );
+ for (int i = 0; i < 1000; i += 2) {
+ _client.remove(ns(), BSON("_id" << i));
}
- };
-
- class CollectionInternalBase : public CollectionBase {
- public:
- CollectionInternalBase( const char *nsLeaf )
- : CollectionBase( nsLeaf ),
- _scopedXact(&_txn, MODE_IX),
- _lk(_txn.lockState(), "unittests", MODE_X),
- _ctx(&_txn, ns()) {
+ BSONObj res;
+ for (int i = 0; i < 1000; i++) {
+ bool found = Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << i), res);
+ ASSERT_EQUALS(i % 2, int(found));
}
+ }
+};
- private:
- ScopedTransaction _scopedXact;
- Lock::DBLock _lk;
- Client::Context _ctx;
- };
-
- class Exhaust : public CollectionInternalBase {
- public:
- Exhaust() : CollectionInternalBase( "exhaust" ) {}
- void run() {
- BSONObj info;
- ASSERT( _client.runCommand( "unittests",
- BSON( "create" << "querytests.exhaust" <<
- "capped" << true << "size" << 8192 ), info ) );
- _client.insert( ns(), BSON( "ts" << 0 ) );
- Message message;
- assembleRequest( ns(), BSON( "ts" << GTE << 0 ), 0, 0, 0,
- QueryOption_OplogReplay | QueryOption_CursorTailable |
- QueryOption_Exhaust,
- message );
- DbMessage dbMessage( message );
- QueryMessage queryMessage( dbMessage );
- Message result;
- string exhaust = runQuery( &_txn, message, queryMessage, NamespaceString(ns()),
- *cc().curop(), result );
- ASSERT( exhaust.size() );
- ASSERT_EQUALS( string( ns() ), exhaust );
- }
- };
-
- class QueryCursorTimeout : public CollectionInternalBase {
- public:
- QueryCursorTimeout() : CollectionInternalBase( "querycursortimeout" ) {}
- void run() {
- for( int i = 0; i < 150; ++i ) {
- insert( ns(), BSONObj() );
- }
- auto_ptr<DBClientCursor> c = _client.query( ns(), Query() );
- ASSERT( c->more() );
- long long cursorId = c->getCursorId();
-
- ClientCursor *clientCursor = 0;
- {
- AutoGetCollectionForRead ctx(&_txn, ns());
- ClientCursorPin clientCursorPointer(ctx.getCollection()->getCursorManager(),
- cursorId);
- clientCursor = clientCursorPointer.c();
- // clientCursorPointer destructor unpins the cursor.
- }
- ASSERT( clientCursor->shouldTimeout( 600001 ) );
+class ClientCursorTest : public CollectionBase {
+ ClientCursorTest() : CollectionBase("clientcursortest") {}
+
+ void run() {
+ Client::WriteContext ctx(&_txn, ns());
+
+ for (int i = 0; i < 1000; i++) {
+ insert(ns(), BSON("_id" << i << "x" << i * 2));
}
- };
-
- class QueryReadsAll : public CollectionBase {
- public:
- QueryReadsAll() : CollectionBase( "queryreadsall" ) {}
- void run() {
- for( int i = 0; i < 5; ++i ) {
- insert( ns(), BSONObj() );
+ }
+};
+
+class FindingStart : public CollectionBase {
+public:
+ FindingStart() : CollectionBase("findingstart") {}
+
+ void run() {
+ cout << "1 SFDSDF" << endl;
+ BSONObj info;
+ ASSERT(_client.runCommand("unittests",
+ BSON("create"
+ << "querytests.findingstart"
+ << "capped" << true << "$nExtents" << 5 << "autoIndexId"
+ << false),
+ info));
+
+ int i = 0;
+ int max = 1;
+
+ while (1) {
+ int oldCount = count();
+ _client.insert(ns(), BSON("ts" << i++));
+ int newCount = count();
+ if (oldCount == newCount || newCount < max)
+ break;
+
+ if (newCount > max)
+ max = newCount;
+ }
+
+ for (int k = 0; k < 5; ++k) {
+ _client.insert(ns(), BSON("ts" << i++));
+ int min =
+ _client.query(ns(), Query().sort(BSON("$natural" << 1)))->next()["ts"].numberInt();
+ for (int j = -1; j < i; ++j) {
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns(), QUERY("ts" << GTE << j), 0, 0, 0, QueryOption_OplogReplay);
+ ASSERT(c->more());
+ BSONObj next = c->next();
+ ASSERT(!next["ts"].eoo());
+ ASSERT_EQUALS((j > min ? j : min), next["ts"].numberInt());
}
- auto_ptr<DBClientCursor> c = _client.query( ns(), Query(), 5 );
- ASSERT( c->more() );
- // With five results and a batch size of 5, no cursor is created.
- ASSERT_EQUALS( 0, c->getCursorId() );
- }
- };
-
- /**
- * Check that an attempt to kill a pinned cursor fails and produces an appropriate assertion.
- */
- class KillPinnedCursor : public CollectionBase {
- public:
- KillPinnedCursor() : CollectionBase( "killpinnedcursor" ) {
+ cout << k << endl;
}
- void run() {
- _client.insert( ns(), vector<BSONObj>( 3, BSONObj() ) );
- auto_ptr<DBClientCursor> cursor = _client.query( ns(), BSONObj(), 0, 0, 0, 0, 2 );
- ASSERT_EQUALS( 2, cursor->objsLeftInBatch() );
- long long cursorId = cursor->getCursorId();
-
- {
- Client::WriteContext ctx(&_txn, ns() );
- ClientCursorPin pinCursor( ctx.ctx().db()->getCollection( ns() )
- ->getCursorManager(),
- cursorId );
- string expectedAssertion =
- str::stream() << "Cannot kill active cursor " << cursorId;
- ASSERT_THROWS_WHAT(CursorManager::eraseCursorGlobal(&_txn, cursorId),
- MsgAssertionException, expectedAssertion);
+ }
+};
+
+class FindingStartPartiallyFull : public CollectionBase {
+public:
+ FindingStartPartiallyFull() : CollectionBase("findingstart") {}
+
+ void run() {
+ cout << "2 ;kljsdf" << endl;
+ size_t startNumCursors = numCursorsOpen();
+
+ BSONObj info;
+ ASSERT(_client.runCommand("unittests",
+ BSON("create"
+ << "querytests.findingstart"
+ << "capped" << true << "$nExtents" << 5 << "autoIndexId"
+ << false),
+ info));
+
+ int i = 0;
+ for (; i < 150; _client.insert(ns(), BSON("ts" << i++)))
+ ;
+
+ for (int k = 0; k < 5; ++k) {
+ _client.insert(ns(), BSON("ts" << i++));
+ int min =
+ _client.query(ns(), Query().sort(BSON("$natural" << 1)))->next()["ts"].numberInt();
+ for (int j = -1; j < i; ++j) {
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns(), QUERY("ts" << GTE << j), 0, 0, 0, QueryOption_OplogReplay);
+ ASSERT(c->more());
+ BSONObj next = c->next();
+ ASSERT(!next["ts"].eoo());
+ ASSERT_EQUALS((j > min ? j : min), next["ts"].numberInt());
}
-
- // Verify that the remaining document is read from the cursor.
- ASSERT_EQUALS( 3, cursor->itcount() );
+ cout << k << endl;
}
- };
-
- namespace queryobjecttests {
- class names1 {
- public:
- void run() {
- ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "query" << BSON( "x" << 1 ) ).getFilter() );
- ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "$query" << BSON( "x" << 1 ) ).getFilter() );
- }
- };
+ ASSERT_EQUALS(startNumCursors, numCursorsOpen());
}
+};
- class OrderingTest {
- public:
- void run() {
- {
- Ordering o = Ordering::make( BSON( "a" << 1 << "b" << -1 << "c" << 1 ) );
- ASSERT_EQUALS( 1 , o.get(0) );
- ASSERT_EQUALS( -1 , o.get(1) );
- ASSERT_EQUALS( 1 , o.get(2) );
+/**
+ * Check OplogReplay mode where query timestamp is earlier than the earliest
+ * entry in the collection.
+ */
+class FindingStartStale : public CollectionBase {
+public:
+ FindingStartStale() : CollectionBase("findingstart") {}
+
+ void run() {
+ cout << "3 xcxcv" << endl;
+ size_t startNumCursors = numCursorsOpen();
+
+ // Check OplogReplay mode with missing collection.
+ auto_ptr<DBClientCursor> c0 =
+ _client.query(ns(), QUERY("ts" << GTE << 50), 0, 0, 0, QueryOption_OplogReplay);
+ ASSERT(!c0->more());
+
+ BSONObj info;
+ ASSERT(_client.runCommand("unittests",
+ BSON("create"
+ << "querytests.findingstart"
+ << "capped" << true << "$nExtents" << 5 << "autoIndexId"
+ << false),
+ info));
+
+ // Check OplogReplay mode with empty collection.
+ auto_ptr<DBClientCursor> c =
+ _client.query(ns(), QUERY("ts" << GTE << 50), 0, 0, 0, QueryOption_OplogReplay);
+ ASSERT(!c->more());
+
+ // Check with some docs in the collection.
+ for (int i = 100; i < 150; _client.insert(ns(), BSON("ts" << i++)))
+ ;
+ c = _client.query(ns(), QUERY("ts" << GTE << 50), 0, 0, 0, QueryOption_OplogReplay);
+ ASSERT(c->more());
+ ASSERT_EQUALS(100, c->next()["ts"].numberInt());
+
+ // Check that no persistent cursors outlast our queries above.
+ ASSERT_EQUALS(startNumCursors, numCursorsOpen());
+ }
+};
+
+class WhatsMyUri : public CollectionBase {
+public:
+ WhatsMyUri() : CollectionBase("whatsmyuri") {}
+ void run() {
+ BSONObj result;
+ _client.runCommand("admin", BSON("whatsmyuri" << 1), result);
+ ASSERT_EQUALS(unknownAddress.toString(), result["you"].str());
+ }
+};
+
+class CollectionInternalBase : public CollectionBase {
+public:
+ CollectionInternalBase(const char* nsLeaf)
+ : CollectionBase(nsLeaf),
+ _scopedXact(&_txn, MODE_IX),
+ _lk(_txn.lockState(), "unittests", MODE_X),
+ _ctx(&_txn, ns()) {}
+
+private:
+ ScopedTransaction _scopedXact;
+ Lock::DBLock _lk;
+ Client::Context _ctx;
+};
+
+class Exhaust : public CollectionInternalBase {
+public:
+ Exhaust() : CollectionInternalBase("exhaust") {}
+ void run() {
+ BSONObj info;
+ ASSERT(_client.runCommand("unittests",
+ BSON("create"
+ << "querytests.exhaust"
+ << "capped" << true << "size" << 8192),
+ info));
+ _client.insert(ns(), BSON("ts" << 0));
+ Message message;
+ assembleRequest(ns(),
+ BSON("ts" << GTE << 0),
+ 0,
+ 0,
+ 0,
+ QueryOption_OplogReplay | QueryOption_CursorTailable | QueryOption_Exhaust,
+ message);
+ DbMessage dbMessage(message);
+ QueryMessage queryMessage(dbMessage);
+ Message result;
+ string exhaust =
+ runQuery(&_txn, message, queryMessage, NamespaceString(ns()), *cc().curop(), result);
+ ASSERT(exhaust.size());
+ ASSERT_EQUALS(string(ns()), exhaust);
+ }
+};
+
+class QueryCursorTimeout : public CollectionInternalBase {
+public:
+ QueryCursorTimeout() : CollectionInternalBase("querycursortimeout") {}
+ void run() {
+ for (int i = 0; i < 150; ++i) {
+ insert(ns(), BSONObj());
+ }
+ auto_ptr<DBClientCursor> c = _client.query(ns(), Query());
+ ASSERT(c->more());
+ long long cursorId = c->getCursorId();
+
+ ClientCursor* clientCursor = 0;
+ {
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ ClientCursorPin clientCursorPointer(ctx.getCollection()->getCursorManager(), cursorId);
+ clientCursor = clientCursorPointer.c();
+ // clientCursorPointer destructor unpins the cursor.
+ }
+ ASSERT(clientCursor->shouldTimeout(600001));
+ }
+};
+
+class QueryReadsAll : public CollectionBase {
+public:
+ QueryReadsAll() : CollectionBase("queryreadsall") {}
+ void run() {
+ for (int i = 0; i < 5; ++i) {
+ insert(ns(), BSONObj());
+ }
+ auto_ptr<DBClientCursor> c = _client.query(ns(), Query(), 5);
+ ASSERT(c->more());
+ // With five results and a batch size of 5, no cursor is created.
+ ASSERT_EQUALS(0, c->getCursorId());
+ }
+};
- ASSERT( ! o.descending( 1 ) );
- ASSERT( o.descending( 1 << 1 ) );
- ASSERT( ! o.descending( 1 << 2 ) );
- }
+/**
+ * Check that an attempt to kill a pinned cursor fails and produces an appropriate assertion.
+ */
+class KillPinnedCursor : public CollectionBase {
+public:
+ KillPinnedCursor() : CollectionBase("killpinnedcursor") {}
+ void run() {
+ _client.insert(ns(), vector<BSONObj>(3, BSONObj()));
+ auto_ptr<DBClientCursor> cursor = _client.query(ns(), BSONObj(), 0, 0, 0, 0, 2);
+ ASSERT_EQUALS(2, cursor->objsLeftInBatch());
+ long long cursorId = cursor->getCursorId();
+
+ {
+ Client::WriteContext ctx(&_txn, ns());
+ ClientCursorPin pinCursor(ctx.ctx().db()->getCollection(ns())->getCursorManager(),
+ cursorId);
+ string expectedAssertion = str::stream() << "Cannot kill active cursor " << cursorId;
+ ASSERT_THROWS_WHAT(CursorManager::eraseCursorGlobal(&_txn, cursorId),
+ MsgAssertionException,
+ expectedAssertion);
+ }
- {
- Ordering o = Ordering::make( BSON( "a.d" << 1 << "a" << 1 << "e" << -1 ) );
- ASSERT_EQUALS( 1 , o.get(0) );
- ASSERT_EQUALS( 1 , o.get(1) );
- ASSERT_EQUALS( -1 , o.get(2) );
+ // Verify that the remaining document is read from the cursor.
+ ASSERT_EQUALS(3, cursor->itcount());
+ }
+};
+
+namespace queryobjecttests {
+class names1 {
+public:
+ void run() {
+ ASSERT_EQUALS(BSON("x" << 1), QUERY("query" << BSON("x" << 1)).getFilter());
+ ASSERT_EQUALS(BSON("x" << 1), QUERY("$query" << BSON("x" << 1)).getFilter());
+ }
+};
+}
- ASSERT( ! o.descending( 1 ) );
- ASSERT( ! o.descending( 1 << 1 ) );
- ASSERT( o.descending( 1 << 2 ) );
- }
+class OrderingTest {
+public:
+ void run() {
+ {
+ Ordering o = Ordering::make(BSON("a" << 1 << "b" << -1 << "c" << 1));
+ ASSERT_EQUALS(1, o.get(0));
+ ASSERT_EQUALS(-1, o.get(1));
+ ASSERT_EQUALS(1, o.get(2));
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "query" ) {
+ ASSERT(!o.descending(1));
+ ASSERT(o.descending(1 << 1));
+ ASSERT(!o.descending(1 << 2));
}
- void setupTests() {
- add< FindingStart >();
- add< FindOneOr >();
- add< FindOneRequireIndex >();
- add< FindOneEmptyObj >();
- add< BoundedKey >();
- add< GetMore >();
- add< GetMoreKillOp >();
- add< GetMoreInvalidRequest >();
- add< PositiveLimit >();
- add< ReturnOneOfManyAndTail >();
- add< TailNotAtEnd >();
- add< EmptyTail >();
- add< TailableDelete >();
- add< TailableDelete2 >();
- add< TailableInsertDelete >();
- add< TailCappedOnly >();
- add< TailableQueryOnId >();
- add< OplogReplayMode >();
- add< OplogReplaySlaveReadTill >();
- add< OplogReplayExplain >();
- add< ArrayId >();
- add< UnderscoreNs >();
- add< EmptyFieldSpec >();
- add< MultiNe >();
- add< EmbeddedNe >();
- add< EmbeddedNumericTypes >();
- add< AutoResetIndexCache >();
- add< UniqueIndex >();
- add< UniqueIndexPreexistingData >();
- add< SubobjectInArray >();
- add< Size >();
- add< FullArray >();
- add< InsideArray >();
- add< IndexInsideArrayCorrect >();
- add< SubobjArr >();
- add< MinMax >();
- add< MatchCodeCodeWScope >();
- add< MatchDBRefType >();
- add< DirectLocking >();
- add< FastCountIn >();
- add< EmbeddedArray >();
- add< DifferentNumbers >();
- add< SymbolStringSame >();
- add< TailableCappedRaceCondition >();
- add< HelperTest >();
- add< HelperByIdTest >();
- add< FindingStartPartiallyFull >();
- add< FindingStartStale >();
- add< WhatsMyUri >();
- add< Exhaust >();
- add< QueryCursorTimeout >();
- add< QueryReadsAll >();
- add< KillPinnedCursor >();
-
- add< queryobjecttests::names1 >();
-
- add< OrderingTest >();
- }
- };
+ {
+ Ordering o = Ordering::make(BSON("a.d" << 1 << "a" << 1 << "e" << -1));
+ ASSERT_EQUALS(1, o.get(0));
+ ASSERT_EQUALS(1, o.get(1));
+ ASSERT_EQUALS(-1, o.get(2));
- SuiteInstance<All> myall;
+ ASSERT(!o.descending(1));
+ ASSERT(!o.descending(1 << 1));
+ ASSERT(o.descending(1 << 2));
+ }
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("query") {}
+
+ void setupTests() {
+ add<FindingStart>();
+ add<FindOneOr>();
+ add<FindOneRequireIndex>();
+ add<FindOneEmptyObj>();
+ add<BoundedKey>();
+ add<GetMore>();
+ add<GetMoreKillOp>();
+ add<GetMoreInvalidRequest>();
+ add<PositiveLimit>();
+ add<ReturnOneOfManyAndTail>();
+ add<TailNotAtEnd>();
+ add<EmptyTail>();
+ add<TailableDelete>();
+ add<TailableDelete2>();
+ add<TailableInsertDelete>();
+ add<TailCappedOnly>();
+ add<TailableQueryOnId>();
+ add<OplogReplayMode>();
+ add<OplogReplaySlaveReadTill>();
+ add<OplogReplayExplain>();
+ add<ArrayId>();
+ add<UnderscoreNs>();
+ add<EmptyFieldSpec>();
+ add<MultiNe>();
+ add<EmbeddedNe>();
+ add<EmbeddedNumericTypes>();
+ add<AutoResetIndexCache>();
+ add<UniqueIndex>();
+ add<UniqueIndexPreexistingData>();
+ add<SubobjectInArray>();
+ add<Size>();
+ add<FullArray>();
+ add<InsideArray>();
+ add<IndexInsideArrayCorrect>();
+ add<SubobjArr>();
+ add<MinMax>();
+ add<MatchCodeCodeWScope>();
+ add<MatchDBRefType>();
+ add<DirectLocking>();
+ add<FastCountIn>();
+ add<EmbeddedArray>();
+ add<DifferentNumbers>();
+ add<SymbolStringSame>();
+ add<TailableCappedRaceCondition>();
+ add<HelperTest>();
+ add<HelperByIdTest>();
+ add<FindingStartPartiallyFull>();
+ add<FindingStartStale>();
+ add<WhatsMyUri>();
+ add<Exhaust>();
+ add<QueryCursorTimeout>();
+ add<QueryReadsAll>();
+ add<KillPinnedCursor>();
+
+ add<queryobjecttests::names1>();
+
+ add<OrderingTest>();
+ }
+};
-} // namespace QueryTests
+SuiteInstance<All> myall;
+} // namespace QueryTests
diff --git a/src/mongo/dbtests/replica_set_monitor_test.cpp b/src/mongo/dbtests/replica_set_monitor_test.cpp
index 109b21adb47..cba64c26b80 100644
--- a/src/mongo/dbtests/replica_set_monitor_test.cpp
+++ b/src/mongo/dbtests/replica_set_monitor_test.cpp
@@ -75,1559 +75,1577 @@ typedef SetState::Nodes Nodes;
namespace mongo_test {
- bool isCompatible(const Node& node, ReadPreference pref, const TagSet& tagSet) {
- set<HostAndPort> seeds;
- seeds.insert(node.host);
- SetState set("name", seeds);
- set.nodes.push_back(node);
-
- ReadPreferenceSetting criteria(pref, tagSet);
- return !set.getMatchingHost(criteria).empty();
- }
-
- HostAndPort selectNode(const vector<Node>& nodes,
- ReadPreference pref,
- const TagSet& tagSet,
- int latencyThresholdMillis,
- bool* isPrimarySelected)
- {
- invariant(!nodes.empty());
- set<HostAndPort> seeds;
- seeds.insert(nodes.front().host);
- SetState set("name", seeds);
- set.nodes = nodes;
- set.latencyThresholdMicros = latencyThresholdMillis * 1000;
-
- ReadPreferenceSetting criteria(pref, tagSet);
- HostAndPort out = set.getMatchingHost(criteria);
- if (isPrimarySelected)
- *isPrimarySelected = !out.empty() && set.findNode(out)->isMaster;
- return out;
- }
+bool isCompatible(const Node& node, ReadPreference pref, const TagSet& tagSet) {
+ set<HostAndPort> seeds;
+ seeds.insert(node.host);
+ SetState set("name", seeds);
+ set.nodes.push_back(node);
+
+ ReadPreferenceSetting criteria(pref, tagSet);
+ return !set.getMatchingHost(criteria).empty();
+}
+HostAndPort selectNode(const vector<Node>& nodes,
+ ReadPreference pref,
+ const TagSet& tagSet,
+ int latencyThresholdMillis,
+ bool* isPrimarySelected) {
+ invariant(!nodes.empty());
+ set<HostAndPort> seeds;
+ seeds.insert(nodes.front().host);
+ SetState set("name", seeds);
+ set.nodes = nodes;
+ set.latencyThresholdMicros = latencyThresholdMillis * 1000;
+
+ ReadPreferenceSetting criteria(pref, tagSet);
+ HostAndPort out = set.getMatchingHost(criteria);
+ if (isPrimarySelected)
+ *isPrimarySelected = !out.empty() && set.findNode(out)->isMaster;
+ return out;
+}
- const BSONObj SampleIsMasterDoc = BSON("tags"
- << BSON("dc" << "NYC"
- << "p" << "2"
- << "region" << "NA"));
- const BSONObj SampleTags = SampleIsMasterDoc["tags"].Obj();
- const BSONObj NoTags = BSONObj();
- const BSONObj NoTagIsMasterDoc = BSON("isMaster" << true);
- TEST(ReplSetMonitorNode, SimpleGoodMatch) {
- Node node(((HostAndPort())));
- node.tags = BSON("dc" << "sf");
- ASSERT(node.matches(BSON("dc" << "sf")));
- }
+const BSONObj SampleIsMasterDoc = BSON("tags" << BSON("dc"
+ << "NYC"
+ << "p"
+ << "2"
+ << "region"
+ << "NA"));
+const BSONObj SampleTags = SampleIsMasterDoc["tags"].Obj();
+const BSONObj NoTags = BSONObj();
+const BSONObj NoTagIsMasterDoc = BSON("isMaster" << true);
+
+TEST(ReplSetMonitorNode, SimpleGoodMatch) {
+ Node node(((HostAndPort())));
+ node.tags = BSON("dc"
+ << "sf");
+ ASSERT(node.matches(BSON("dc"
+ << "sf")));
+}
- TEST(ReplSetMonitorNode, SimpleBadMatch) {
- Node node((HostAndPort()));
- node.tags = BSON("dc" << "nyc");
- ASSERT(!node.matches(BSON("dc" << "sf")));
- }
+TEST(ReplSetMonitorNode, SimpleBadMatch) {
+ Node node((HostAndPort()));
+ node.tags = BSON("dc"
+ << "nyc");
+ ASSERT(!node.matches(BSON("dc"
+ << "sf")));
+}
- TEST(ReplSetMonitorNode, ExactMatch) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(node.matches(SampleIsMasterDoc["tags"].Obj()));
- }
+TEST(ReplSetMonitorNode, ExactMatch) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(node.matches(SampleIsMasterDoc["tags"].Obj()));
+}
- TEST(ReplSetMonitorNode, EmptyTag) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(node.matches(BSONObj()));
- }
+TEST(ReplSetMonitorNode, EmptyTag) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(node.matches(BSONObj()));
+}
- TEST(ReplSetMonitorNode, MemberNoTagMatchesEmptyTag) {
- Node node((HostAndPort()));
- node.tags = NoTags;
- ASSERT(node.matches(BSONObj()));
- }
+TEST(ReplSetMonitorNode, MemberNoTagMatchesEmptyTag) {
+ Node node((HostAndPort()));
+ node.tags = NoTags;
+ ASSERT(node.matches(BSONObj()));
+}
- TEST(ReplSetMonitorNode, MemberNoTagDoesNotMatch) {
- Node node((HostAndPort()));
- node.tags = NoTags;
- ASSERT(!node.matches(BSON("dc" << "NYC")));
- }
+TEST(ReplSetMonitorNode, MemberNoTagDoesNotMatch) {
+ Node node((HostAndPort()));
+ node.tags = NoTags;
+ ASSERT(!node.matches(BSON("dc"
+ << "NYC")));
+}
- TEST(ReplSetMonitorNode, IncompleteMatch) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(!node.matches(BSON("dc" << "NYC"
- << "p" << "2"
- << "hello" << "world")));
- }
+TEST(ReplSetMonitorNode, IncompleteMatch) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(!node.matches(BSON("dc"
+ << "NYC"
+ << "p"
+ << "2"
+ << "hello"
+ << "world")));
+}
- TEST(ReplSetMonitorNode, PartialMatch) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(node.matches(BSON("dc" << "NYC"
- << "p" << "2")));
- }
+TEST(ReplSetMonitorNode, PartialMatch) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(node.matches(BSON("dc"
+ << "NYC"
+ << "p"
+ << "2")));
+}
- TEST(ReplSetMonitorNode, SingleTagCrit) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(node.matches(BSON("p" << "2")));
- }
+TEST(ReplSetMonitorNode, SingleTagCrit) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(node.matches(BSON("p"
+ << "2")));
+}
- TEST(ReplSetMonitorNode, BadSingleTagCrit) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(!node.matches(BSON("dc" << "SF")));
- }
+TEST(ReplSetMonitorNode, BadSingleTagCrit) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(!node.matches(BSON("dc"
+ << "SF")));
+}
- TEST(ReplSetMonitorNode, NonExistingFieldTag) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(!node.matches(BSON("noSQL" << "Mongo")));
- }
+TEST(ReplSetMonitorNode, NonExistingFieldTag) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(!node.matches(BSON("noSQL"
+ << "Mongo")));
+}
- TEST(ReplSetMonitorNode, UnorederedMatching) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(node.matches(BSON("p" << "2" << "dc" << "NYC")));
- }
+TEST(ReplSetMonitorNode, UnorederedMatching) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(node.matches(BSON("p"
+ << "2"
+ << "dc"
+ << "NYC")));
+}
- TEST(ReplSetMonitorNode, SameValueDiffKey) {
- Node node((HostAndPort()));
- node.tags = SampleTags;
- ASSERT(!node.matches(BSON("datacenter" << "NYC")));
- }
+TEST(ReplSetMonitorNode, SameValueDiffKey) {
+ Node node((HostAndPort()));
+ node.tags = SampleTags;
+ ASSERT(!node.matches(BSON("datacenter"
+ << "NYC")));
+}
- TEST(ReplSetMonitorNode, PriNodeCompatibleTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, PriNodeCompatibleTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = true;
+ node.isUp = true;
+ node.isMaster = true;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "NYC"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "NYC"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- TEST(ReplSetMonitorNode, SecNodeCompatibleTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, SecNodeCompatibleTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = false;
+ node.isUp = true;
+ node.isMaster = false;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "NYC"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "NYC"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- TEST(ReplSetMonitorNode, PriNodeNotCompatibleTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, PriNodeNotCompatibleTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = true;
+ node.isUp = true;
+ node.isMaster = true;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "SF"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "SF"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- TEST(ReplSetMonitorNode, SecNodeNotCompatibleTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, SecNodeNotCompatibleTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = false;
+ node.isUp = true;
+ node.isMaster = false;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "SF"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "SF"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- TEST(ReplSetMonitorNode, PriNodeCompatiblMultiTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, PriNodeCompatiblMultiTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = true;
+ node.isUp = true;
+ node.isMaster = true;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "RP"));
- builder.append(BSON("dc" << "NYC" << "p" << "2"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "RP"));
+ builder.append(BSON("dc"
+ << "NYC"
+ << "p"
+ << "2"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- TEST(ReplSetMonitorNode, SecNodeCompatibleMultiTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, SecNodeCompatibleMultiTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = false;
+ node.isUp = true;
+ node.isMaster = false;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "RP"));
- builder.append(BSON("dc" << "NYC" << "p" << "2"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "RP"));
+ builder.append(BSON("dc"
+ << "NYC"
+ << "p"
+ << "2"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- TEST(ReplSetMonitorNode, PriNodeNotCompatibleMultiTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, PriNodeNotCompatibleMultiTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = true;
+ node.isUp = true;
+ node.isMaster = true;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "sf"));
- builder.append(BSON("dc" << "NYC" << "P" << "4"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "sf"));
+ builder.append(BSON("dc"
+ << "NYC"
+ << "P"
+ << "4"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- TEST(ReplSetMonitorNode, SecNodeNotCompatibleMultiTag) {
- Node node(HostAndPort("dummy", 3));
- node.tags = SampleTags;
+TEST(ReplSetMonitorNode, SecNodeNotCompatibleMultiTag) {
+ Node node(HostAndPort("dummy", 3));
+ node.tags = SampleTags;
- node.isUp = true;
- node.isMaster = false;
+ node.isUp = true;
+ node.isMaster = false;
- BSONArrayBuilder builder;
- builder.append(BSON("dc" << "sf"));
- builder.append(BSON("dc" << "NYC" << "P" << "4"));
+ BSONArrayBuilder builder;
+ builder.append(BSON("dc"
+ << "sf"));
+ builder.append(BSON("dc"
+ << "NYC"
+ << "P"
+ << "4"));
- TagSet tags(BSONArray(builder.done()));
+ TagSet tags(BSONArray(builder.done()));
- ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
- }
+ ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryOnly, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_PrimaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryPreferred, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_SecondaryOnly, tags));
+ ASSERT(!isCompatible(node, mongo::ReadPreference_Nearest, tags));
+}
- class NodeSetFixtures {
- public:
- static vector<Node> getThreeMemberWithTags();
- };
+class NodeSetFixtures {
+public:
+ static vector<Node> getThreeMemberWithTags();
+};
- vector<Node> NodeSetFixtures::getThreeMemberWithTags() {
- vector<Node> nodes;
+vector<Node> NodeSetFixtures::getThreeMemberWithTags() {
+ vector<Node> nodes;
- nodes.push_back(Node(HostAndPort("a")));
- nodes.push_back(Node(HostAndPort("b")));
- nodes.push_back(Node(HostAndPort("c")));
+ nodes.push_back(Node(HostAndPort("a")));
+ nodes.push_back(Node(HostAndPort("b")));
+ nodes.push_back(Node(HostAndPort("c")));
- nodes[0].isUp = true;
- nodes[1].isUp = true;
- nodes[2].isUp = true;
+ nodes[0].isUp = true;
+ nodes[1].isUp = true;
+ nodes[2].isUp = true;
- nodes[0].isMaster = false;
- nodes[1].isMaster = true;
- nodes[2].isMaster = false;
+ nodes[0].isMaster = false;
+ nodes[1].isMaster = true;
+ nodes[2].isMaster = false;
- nodes[0].tags = BSON("dc" << "nyc" << "p" << "1");
- nodes[1].tags = BSON("dc" << "sf");
- nodes[2].tags = BSON("dc" << "nyc" << "p" << "2");
+ nodes[0].tags = BSON("dc"
+ << "nyc"
+ << "p"
+ << "1");
+ nodes[1].tags = BSON("dc"
+ << "sf");
+ nodes[2].tags = BSON("dc"
+ << "nyc"
+ << "p"
+ << "2");
- return nodes;
- }
+ return nodes;
+}
- class TagSetFixtures {
- public:
- static BSONArray getDefaultSet();
- static BSONArray getP2Tag();
- static BSONArray getSingleNoMatchTag();
- static BSONArray getMultiNoMatchTag();
- };
+class TagSetFixtures {
+public:
+ static BSONArray getDefaultSet();
+ static BSONArray getP2Tag();
+ static BSONArray getSingleNoMatchTag();
+ static BSONArray getMultiNoMatchTag();
+};
+
+BSONArray TagSetFixtures::getDefaultSet() {
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSONObj());
+ return arrayBuilder.arr();
+}
- BSONArray TagSetFixtures::getDefaultSet() {
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSONObj());
- return arrayBuilder.arr();
- }
+BSONArray TagSetFixtures::getP2Tag() {
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("p"
+ << "2"));
+ return arrayBuilder.arr();
+}
- BSONArray TagSetFixtures::getP2Tag() {
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("p" << "2"));
- return arrayBuilder.arr();
- }
+BSONArray TagSetFixtures::getSingleNoMatchTag() {
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("k"
+ << "x"));
+ return arrayBuilder.arr();
+}
- BSONArray TagSetFixtures::getSingleNoMatchTag() {
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("k" << "x"));
- return arrayBuilder.arr();
- }
+BSONArray TagSetFixtures::getMultiNoMatchTag() {
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("mongo"
+ << "db"));
+ arrayBuilder.append(BSON("by"
+ << "10gen"));
+ return arrayBuilder.arr();
+}
- BSONArray TagSetFixtures::getMultiNoMatchTag() {
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("mongo" << "db"));
- arrayBuilder.append(BSON("by" << "10gen"));
- return arrayBuilder.arr();
- }
+TEST(ReplSetMonitorReadPref, PrimaryOnly) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, PrimaryOnly) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, PrimaryOnlyPriNotOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, PrimaryOnlyPriNotOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[1].markFailed();
- nodes[1].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, PrimaryMissing) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, PrimaryMissing) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[1].isMaster = false;
- nodes[1].isMaster = false;
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, PriPrefWithPriOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- TEST(ReplSetMonitorReadPref, PriPrefWithPriOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TagSet tags(TagSetFixtures::getDefaultSet());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 1, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 1,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, PriPrefWithPriNotOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, PriPrefWithPriNotOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[1].markFailed();
- nodes[1].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 1, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 1,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT(host.host() == "a" || host.host() == "c");
+}
- ASSERT(!isPrimarySelected);
- ASSERT(host.host() == "a" || host.host() == "c");
- }
+TEST(ReplSetMonitorReadPref, SecOnly) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, SecOnly) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[2].markFailed();
- nodes[2].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryOnly, tags, 1, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, tags, 1,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST(ReplSetMonitorReadPref, SecOnlyOnlyPriOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, SecOnlyOnlyPriOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[0].markFailed();
+ nodes[2].markFailed();
- nodes[0].markFailed();
- nodes[2].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryOnly, tags, 1, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, tags, 1,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, SecPref) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, SecPref) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[2].markFailed();
- nodes[2].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 1, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 1,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST(ReplSetMonitorReadPref, SecPrefWithNoSecOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, SecPrefWithNoSecOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[0].markFailed();
+ nodes[2].markFailed();
- nodes[0].markFailed();
- nodes[2].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 1, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 1,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, SecPrefWithNoNodeOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, SecPrefWithNoNodeOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[0].markFailed();
+ nodes[1].markFailed();
+ nodes[2].markFailed();
- nodes[0].markFailed();
- nodes[1].markFailed();
- nodes[2].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 1, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 1,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, NearestAllLocal) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, NearestAllLocal) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[0].latencyMicros = 1 * 1000;
+ nodes[1].latencyMicros = 2 * 1000;
+ nodes[2].latencyMicros = 3 * 1000;
- nodes[0].latencyMicros = 1*1000;
- nodes[1].latencyMicros = 2*1000;
- nodes[2].latencyMicros = 3*1000;
+ bool isPrimarySelected = 0;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_Nearest, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = 0;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, tags, 3,
- &isPrimarySelected);
+ // Any host is ok
+ ASSERT(!host.empty());
+ ASSERT_EQUALS(isPrimarySelected, host.host() == "b");
+}
- // Any host is ok
- ASSERT(!host.empty());
- ASSERT_EQUALS(isPrimarySelected, host.host() == "b");
- }
+TEST(ReplSetMonitorReadPref, NearestOneLocal) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getDefaultSet());
- TEST(ReplSetMonitorReadPref, NearestOneLocal) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getDefaultSet());
+ nodes[0].latencyMicros = 10 * 1000;
+ nodes[1].latencyMicros = 20 * 1000;
+ nodes[2].latencyMicros = 30 * 1000;
- nodes[0].latencyMicros = 10*1000;
- nodes[1].latencyMicros = 20*1000;
- nodes[2].latencyMicros = 30*1000;
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_Nearest, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, tags, 3,
- &isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+ ASSERT(!isPrimarySelected);
+}
- ASSERT_EQUALS("a", host.host());
- ASSERT(!isPrimarySelected);
- }
+TEST(ReplSetMonitorReadPref, PriOnlyWithTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getP2Tag());
- TEST(ReplSetMonitorReadPref, PriOnlyWithTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getP2Tag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ // Note: PrimaryOnly ignores tag
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- // Note: PrimaryOnly ignores tag
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, PriPrefPriNotOkWithTags) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getP2Tag());
- TEST(ReplSetMonitorReadPref, PriPrefPriNotOkWithTags) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getP2Tag());
+ nodes[1].markFailed();
- nodes[1].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+TEST(ReplSetMonitorReadPref, PriPrefPriOkWithTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getSingleNoMatchTag());
- TEST(ReplSetMonitorReadPref, PriPrefPriOkWithTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getSingleNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, PriPrefPriNotOkWithTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getSingleNoMatchTag());
- TEST(ReplSetMonitorReadPref, PriPrefPriNotOkWithTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getSingleNoMatchTag());
+ nodes[1].markFailed();
- nodes[1].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, SecOnlyWithTags) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getP2Tag());
- TEST(ReplSetMonitorReadPref, SecOnlyWithTags) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getP2Tag());
+ bool isPrimarySelected;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+TEST(ReplSetMonitorReadPref, SecOnlyWithTagsMatchOnlyPri) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- TEST(ReplSetMonitorReadPref, SecOnlyWithTagsMatchOnlyPri) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("dc"
+ << "sf"));
+ TagSet tags(arrayBuilder.arr());
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("dc" << "sf"));
- TagSet tags(arrayBuilder.arr());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, SecPrefWithTags) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getP2Tag());
- TEST(ReplSetMonitorReadPref, SecPrefWithTags) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getP2Tag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+TEST(ReplSetMonitorReadPref, SecPrefSecNotOkWithTags) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- TEST(ReplSetMonitorReadPref, SecPrefSecNotOkWithTags) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("dc"
+ << "nyc"));
+ TagSet tags(arrayBuilder.arr());
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("dc" << "nyc"));
- TagSet tags(arrayBuilder.arr());
+ nodes[2].markFailed();
- nodes[2].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST(ReplSetMonitorReadPref, SecPrefPriOkWithTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getSingleNoMatchTag());
- TEST(ReplSetMonitorReadPref, SecPrefPriOkWithTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getSingleNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, SecPrefPriNotOkWithTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getSingleNoMatchTag());
- TEST(ReplSetMonitorReadPref, SecPrefPriNotOkWithTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getSingleNoMatchTag());
+ nodes[1].markFailed();
- nodes[1].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, SecPrefPriOkWithSecNotMatchTag) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getSingleNoMatchTag());
- TEST(ReplSetMonitorReadPref, SecPrefPriOkWithSecNotMatchTag) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getSingleNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, NearestWithTags) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- TEST(ReplSetMonitorReadPref, NearestWithTags) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("p"
+ << "1"));
+ TagSet tags(arrayBuilder.arr());
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("p" << "1"));
- TagSet tags(arrayBuilder.arr());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_Nearest, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, tags, 3,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST(ReplSetMonitorReadPref, NearestWithTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getSingleNoMatchTag());
- TEST(ReplSetMonitorReadPref, NearestWithTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getSingleNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_Nearest, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, tags, 3,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, MultiPriOnlyTag) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- TEST(ReplSetMonitorReadPref, MultiPriOnlyTag) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(ReplSetMonitorReadPref, MultiPriOnlyPriNotOkTag) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- TEST(ReplSetMonitorReadPref, MultiPriOnlyPriNotOkTag) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ nodes[1].markFailed();
- nodes[1].markFailed();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
- }
+TEST(ReplSetMonitorReadPref, PriPrefPriOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- TEST(ReplSetMonitorReadPref, PriPrefPriOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("p"
+ << "1"));
+ arrayBuilder.append(BSON("p"
+ << "2"));
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("p" << "1"));
- arrayBuilder.append(BSON("p" << "2"));
+ TagSet tags(arrayBuilder.arr());
- TagSet tags(arrayBuilder.arr());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
+class MultiTags : public mongo::unittest::Test {
+public:
+ vector<Node> getNodes() const {
+ return NodeSetFixtures::getThreeMemberWithTags();
}
- class MultiTags: public mongo::unittest::Test {
- public:
- vector<Node> getNodes() const {
- return NodeSetFixtures::getThreeMemberWithTags();
- }
-
- const TagSet& getMatchesFirstTagSet() {
- if (matchFirstTags.get() != NULL) {
- return *matchFirstTags;
- }
-
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("p" << "1"));
- arrayBuilder.append(BSON("p" << "2"));
- matchFirstTags.reset(new TagSet(arrayBuilder.arr()));
-
+ const TagSet& getMatchesFirstTagSet() {
+ if (matchFirstTags.get() != NULL) {
return *matchFirstTags;
}
- const TagSet& getMatchesSecondTagSet() {
- if (matchSecondTags.get() != NULL) {
- return *matchSecondTags;
- }
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("p"
+ << "1"));
+ arrayBuilder.append(BSON("p"
+ << "2"));
+ matchFirstTags.reset(new TagSet(arrayBuilder.arr()));
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("p" << "3"));
- arrayBuilder.append(BSON("p" << "2"));
- arrayBuilder.append(BSON("p" << "1"));
- matchSecondTags.reset(new TagSet(arrayBuilder.arr()));
+ return *matchFirstTags;
+ }
+ const TagSet& getMatchesSecondTagSet() {
+ if (matchSecondTags.get() != NULL) {
return *matchSecondTags;
}
- const TagSet& getMatchesLastTagSet() {
- if (matchLastTags.get() != NULL) {
- return *matchLastTags;
- }
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("p"
+ << "3"));
+ arrayBuilder.append(BSON("p"
+ << "2"));
+ arrayBuilder.append(BSON("p"
+ << "1"));
+ matchSecondTags.reset(new TagSet(arrayBuilder.arr()));
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("p" << "12"));
- arrayBuilder.append(BSON("p" << "23"));
- arrayBuilder.append(BSON("p" << "19"));
- arrayBuilder.append(BSON("p" << "34"));
- arrayBuilder.append(BSON("p" << "1"));
- matchLastTags.reset(new TagSet(arrayBuilder.arr()));
+ return *matchSecondTags;
+ }
+ const TagSet& getMatchesLastTagSet() {
+ if (matchLastTags.get() != NULL) {
return *matchLastTags;
}
- const TagSet& getMatchesPriTagSet() {
- if (matchPriTags.get() != NULL) {
- return *matchPriTags;
- }
-
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("dc" << "sf"));
- arrayBuilder.append(BSON("p" << "1"));
- matchPriTags.reset(new TagSet(arrayBuilder.arr()));
-
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("p"
+ << "12"));
+ arrayBuilder.append(BSON("p"
+ << "23"));
+ arrayBuilder.append(BSON("p"
+ << "19"));
+ arrayBuilder.append(BSON("p"
+ << "34"));
+ arrayBuilder.append(BSON("p"
+ << "1"));
+ matchLastTags.reset(new TagSet(arrayBuilder.arr()));
+
+ return *matchLastTags;
+ }
+
+ const TagSet& getMatchesPriTagSet() {
+ if (matchPriTags.get() != NULL) {
return *matchPriTags;
}
- private:
- scoped_ptr<TagSet> matchFirstTags;
- scoped_ptr<TagSet> matchSecondTags;
- scoped_ptr<TagSet> matchLastTags;
- scoped_ptr<TagSet> matchPriTags;
- };
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("dc"
+ << "sf"));
+ arrayBuilder.append(BSON("p"
+ << "1"));
+ matchPriTags.reset(new TagSet(arrayBuilder.arr()));
- TEST_F(MultiTags, MultiTagsMatchesFirst) {
- vector<Node> nodes = getNodes();
+ return *matchPriTags;
+ }
- nodes[1].markFailed();
+private:
+ scoped_ptr<TagSet> matchFirstTags;
+ scoped_ptr<TagSet> matchSecondTags;
+ scoped_ptr<TagSet> matchLastTags;
+ scoped_ptr<TagSet> matchPriTags;
+};
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, getMatchesFirstTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, MultiTagsMatchesFirst) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+ nodes[1].markFailed();
- TEST_F(MultiTags, PriPrefPriNotOkMatchesFirstNotOk) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_PrimaryPreferred,
+ getMatchesFirstTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[0].markFailed();
- nodes[1].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, getMatchesFirstTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, PriPrefPriNotOkMatchesFirstNotOk) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+ nodes[0].markFailed();
+ nodes[1].markFailed();
- TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondTest) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_PrimaryPreferred,
+ getMatchesFirstTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[1].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, getMatchesSecondTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondTest) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+ nodes[1].markFailed();
- TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondNotOkTest) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_PrimaryPreferred,
+ getMatchesSecondTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[1].markFailed();
- nodes[2].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, getMatchesSecondTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondNotOkTest) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+ nodes[1].markFailed();
+ nodes[2].markFailed();
- TEST_F(MultiTags, PriPrefPriNotOkMatchesLastTest) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_PrimaryPreferred,
+ getMatchesSecondTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[1].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, getMatchesLastTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, PriPrefPriNotOkMatchesLastTest) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+ nodes[1].markFailed();
- TEST_F(MultiTags, PriPrefPriNotOkMatchesLastNotOkTest) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_PrimaryPreferred,
+ getMatchesLastTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[0].markFailed();
- nodes[1].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, getMatchesLastTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, PriPrefPriNotOkMatchesLastNotOkTest) {
+ vector<Node> nodes = getNodes();
- ASSERT(host.empty());
- }
+ nodes[0].markFailed();
+ nodes[1].markFailed();
- TEST(MultiTags, PriPrefPriOkNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_PrimaryPreferred,
+ getMatchesLastTagSet(),
+ 3,
+ &isPrimarySelected);
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ ASSERT(host.empty());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 3,
- &isPrimarySelected);
+TEST(MultiTags, PriPrefPriOkNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- TEST(MultiTags, PriPrefPriNotOkNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 3, &isPrimarySelected);
- nodes[1].markFailed();
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_PrimaryPreferred, tags, 3,
- &isPrimarySelected);
+TEST(MultiTags, PriPrefPriNotOkNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- ASSERT(host.empty());
- }
+ nodes[1].markFailed();
- TEST_F(MultiTags, SecOnlyMatchesFirstTest) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_PrimaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, getMatchesFirstTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST_F(MultiTags, SecOnlyMatchesFirstTest) {
+ vector<Node> nodes = getNodes();
- TEST_F(MultiTags, SecOnlyMatchesFirstNotOk) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_SecondaryOnly, getMatchesFirstTagSet(), 3, &isPrimarySelected);
- nodes[0].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, getMatchesFirstTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, SecOnlyMatchesFirstNotOk) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+ nodes[0].markFailed();
- TEST_F(MultiTags, SecOnlyMatchesSecond) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_SecondaryOnly, getMatchesFirstTagSet(), 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, getMatchesSecondTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+TEST_F(MultiTags, SecOnlyMatchesSecond) {
+ vector<Node> nodes = getNodes();
- TEST_F(MultiTags, SecOnlyMatchesSecondNotOk) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryOnly,
+ getMatchesSecondTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[2].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, getMatchesSecondTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, SecOnlyMatchesSecondNotOk) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+ nodes[2].markFailed();
- TEST_F(MultiTags, SecOnlyMatchesLast) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryOnly,
+ getMatchesSecondTagSet(),
+ 3,
+ &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, getMatchesLastTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST_F(MultiTags, SecOnlyMatchesLast) {
+ vector<Node> nodes = getNodes();
- TEST_F(MultiTags, SecOnlyMatchesLastNotOk) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_SecondaryOnly, getMatchesLastTagSet(), 3, &isPrimarySelected);
- nodes[0].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, getMatchesLastTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, SecOnlyMatchesLastNotOk) {
+ vector<Node> nodes = getNodes();
- ASSERT(host.empty());
- }
+ nodes[0].markFailed();
- TEST_F(MultiTags, SecOnlyMultiTagsWithPriMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_SecondaryOnly, getMatchesLastTagSet(), 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, getMatchesPriTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST_F(MultiTags, SecOnlyMultiTagsWithPriMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- TEST_F(MultiTags, SecOnlyMultiTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_SecondaryOnly, getMatchesPriTagSet(), 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryOnly, tags, 3,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(host.empty());
- }
+TEST_F(MultiTags, SecOnlyMultiTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- TEST_F(MultiTags, SecPrefMatchesFirst) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryOnly, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, getMatchesFirstTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST_F(MultiTags, SecPrefMatchesFirst) {
+ vector<Node> nodes = getNodes();
- TEST_F(MultiTags, SecPrefMatchesFirstNotOk) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryPreferred,
+ getMatchesFirstTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[0].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, getMatchesFirstTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, SecPrefMatchesFirstNotOk) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+ nodes[0].markFailed();
- TEST_F(MultiTags, SecPrefMatchesSecond) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryPreferred,
+ getMatchesFirstTagSet(),
+ 3,
+ &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, getMatchesSecondTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+TEST_F(MultiTags, SecPrefMatchesSecond) {
+ vector<Node> nodes = getNodes();
- TEST_F(MultiTags, SecPrefMatchesSecondNotOk) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryPreferred,
+ getMatchesSecondTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[2].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, getMatchesSecondTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, SecPrefMatchesSecondNotOk) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+ nodes[2].markFailed();
- TEST_F(MultiTags, SecPrefMatchesLast) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryPreferred,
+ getMatchesSecondTagSet(),
+ 3,
+ &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, getMatchesLastTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST_F(MultiTags, SecPrefMatchesLast) {
+ vector<Node> nodes = getNodes();
- TEST_F(MultiTags, SecPrefMatchesLastNotOk) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryPreferred,
+ getMatchesLastTagSet(),
+ 3,
+ &isPrimarySelected);
- nodes[0].markFailed();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, getMatchesLastTagSet(),
- 3, &isPrimarySelected);
+TEST_F(MultiTags, SecPrefMatchesLastNotOk) {
+ vector<Node> nodes = getNodes();
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+ nodes[0].markFailed();
- TEST_F(MultiTags, SecPrefMultiTagsWithPriMatch) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryPreferred,
+ getMatchesLastTagSet(),
+ 3,
+ &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, getMatchesPriTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST_F(MultiTags, SecPrefMultiTagsWithPriMatch) {
+ vector<Node> nodes = getNodes();
- TEST(MultiTags, SecPrefMultiTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(nodes,
+ mongo::ReadPreference_SecondaryPreferred,
+ getMatchesPriTagSet(),
+ 3,
+ &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 3,
- &isPrimarySelected);
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+TEST(MultiTags, SecPrefMultiTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- TEST(MultiTags, SecPrefMultiTagsNoMatchPriNotOk) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 3, &isPrimarySelected);
- nodes[1].markFailed();
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_SecondaryPreferred, tags, 3,
- &isPrimarySelected);
+TEST(MultiTags, SecPrefMultiTagsNoMatchPriNotOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- ASSERT(host.empty());
- }
+ nodes[1].markFailed();
- TEST_F(MultiTags, NearestMatchesFirst) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_SecondaryPreferred, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, getMatchesFirstTagSet(),
- 3, &isPrimarySelected);
+ ASSERT(host.empty());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+TEST_F(MultiTags, NearestMatchesFirst) {
+ vector<Node> nodes = getNodes();
- TEST(MultiTags, NearestMatchesFirstNotOk) {
- vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_Nearest, getMatchesFirstTagSet(), 3, &isPrimarySelected);
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("p" << "1"));
- arrayBuilder.append(BSON("dc" << "sf"));
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- TagSet tags(arrayBuilder.arr());
+TEST(MultiTags, NearestMatchesFirstNotOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- nodes[0].markFailed();
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("p"
+ << "1"));
+ arrayBuilder.append(BSON("dc"
+ << "sf"));
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, tags, 3,
- &isPrimarySelected);
+ TagSet tags(arrayBuilder.arr());
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+ nodes[0].markFailed();
- TEST_F(MultiTags, NearestMatchesSecond) {
- vector<Node> nodes = getNodes();
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_Nearest, tags, 3, &isPrimarySelected);
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, getMatchesSecondTagSet(), 3,
- &isPrimarySelected);
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("c", host.host());
- }
+TEST_F(MultiTags, NearestMatchesSecond) {
+ vector<Node> nodes = getNodes();
- TEST_F(MultiTags, NearestMatchesSecondNotOk) {
- vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_Nearest, getMatchesSecondTagSet(), 3, &isPrimarySelected);
- BSONArrayBuilder arrayBuilder;
- arrayBuilder.append(BSON("z" << "2"));
- arrayBuilder.append(BSON("p" << "2"));
- arrayBuilder.append(BSON("dc" << "sf"));
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("c", host.host());
+}
- TagSet tags(arrayBuilder.arr());
+TEST_F(MultiTags, NearestMatchesSecondNotOk) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- nodes[2].markFailed();
+ BSONArrayBuilder arrayBuilder;
+ arrayBuilder.append(BSON("z"
+ << "2"));
+ arrayBuilder.append(BSON("p"
+ << "2"));
+ arrayBuilder.append(BSON("dc"
+ << "sf"));
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, tags, 3,
- &isPrimarySelected);
+ TagSet tags(arrayBuilder.arr());
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+ nodes[2].markFailed();
+
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_Nearest, tags, 3, &isPrimarySelected);
- TEST_F(MultiTags, NearestMatchesLast) {
- vector<Node> nodes = getNodes();
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, getMatchesLastTagSet(), 3,
- &isPrimarySelected);
+TEST_F(MultiTags, NearestMatchesLast) {
+ vector<Node> nodes = getNodes();
- ASSERT(!isPrimarySelected);
- ASSERT_EQUALS("a", host.host());
- }
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_Nearest, getMatchesLastTagSet(), 3, &isPrimarySelected);
- TEST_F(MultiTags, NeatestMatchesLastNotOk) {
- vector<Node> nodes = getNodes();
+ ASSERT(!isPrimarySelected);
+ ASSERT_EQUALS("a", host.host());
+}
- nodes[0].markFailed();
+TEST_F(MultiTags, NeatestMatchesLastNotOk) {
+ vector<Node> nodes = getNodes();
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, getMatchesLastTagSet(), 3,
- &isPrimarySelected);
+ nodes[0].markFailed();
- ASSERT(host.empty());
- }
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_Nearest, getMatchesLastTagSet(), 3, &isPrimarySelected);
- TEST_F(MultiTags, NearestMultiTagsWithPriMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
+ ASSERT(host.empty());
+}
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, getMatchesPriTagSet(), 3,
- &isPrimarySelected);
+TEST_F(MultiTags, NearestMultiTagsWithPriMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
- ASSERT(isPrimarySelected);
- ASSERT_EQUALS("b", host.host());
- }
+ bool isPrimarySelected = false;
+ HostAndPort host = selectNode(
+ nodes, mongo::ReadPreference_Nearest, getMatchesPriTagSet(), 3, &isPrimarySelected);
- TEST(MultiTags, NearestMultiTagsNoMatch) {
- vector<Node> nodes =
- NodeSetFixtures::getThreeMemberWithTags();
- TagSet tags(TagSetFixtures::getMultiNoMatchTag());
+ ASSERT(isPrimarySelected);
+ ASSERT_EQUALS("b", host.host());
+}
+
+TEST(MultiTags, NearestMultiTagsNoMatch) {
+ vector<Node> nodes = NodeSetFixtures::getThreeMemberWithTags();
+ TagSet tags(TagSetFixtures::getMultiNoMatchTag());
- bool isPrimarySelected = false;
- HostAndPort host = selectNode(nodes,
- mongo::ReadPreference_Nearest, tags, 3,
- &isPrimarySelected);
+ bool isPrimarySelected = false;
+ HostAndPort host =
+ selectNode(nodes, mongo::ReadPreference_Nearest, tags, 3, &isPrimarySelected);
+
+ ASSERT(host.empty());
+}
- ASSERT(host.empty());
+TEST(TagSet, DefaultConstructorMatchesAll) {
+ TagSet tags;
+ ASSERT_EQUALS(tags.getTagBSON(), BSON_ARRAY(BSONObj()));
+}
+
+
+// TODO: Port these existing tests here: replmonitor_bad_seed.js, repl_monitor_refresh.js
+
+/**
+ * Warning: Tests running this fixture cannot be run in parallel with other tests
+ * that uses ConnectionString::setConnectionHook
+ */
+class ReplicaSetMonitorTest : public mongo::unittest::Test {
+protected:
+ void setUp() {
+ _replSet.reset(new MockReplicaSet("test", 3));
+ _originalConnectionHook = ConnectionString::getConnectionHook();
+ ConnectionString::setConnectionHook(mongo::MockConnRegistry::get()->getConnStrHook());
}
- TEST(TagSet, DefaultConstructorMatchesAll) {
- TagSet tags;
- ASSERT_EQUALS(tags.getTagBSON(), BSON_ARRAY(BSONObj()));
+ void tearDown() {
+ ConnectionString::setConnectionHook(_originalConnectionHook);
+ ReplicaSetMonitor::cleanup();
+ _replSet.reset();
+ mongo::ScopedDbConnection::clearPool();
+ }
+
+ MockReplicaSet* getReplSet() {
+ return _replSet.get();
}
+private:
+ ConnectionString::ConnectionHook* _originalConnectionHook;
+ boost::scoped_ptr<MockReplicaSet> _replSet;
+};
- // TODO: Port these existing tests here: replmonitor_bad_seed.js, repl_monitor_refresh.js
+TEST_F(ReplicaSetMonitorTest, SeedWithPriOnlySecDown) {
+ // Test to make sure that the monitor doesn't crash when
+ // ConnectionString::connect returns NULL
+ MockReplicaSet* replSet = getReplSet();
+ replSet->kill(replSet->getSecondaries());
- /**
- * Warning: Tests running this fixture cannot be run in parallel with other tests
- * that uses ConnectionString::setConnectionHook
- */
- class ReplicaSetMonitorTest: public mongo::unittest::Test {
- protected:
- void setUp() {
- _replSet.reset(new MockReplicaSet("test", 3));
- _originalConnectionHook = ConnectionString::getConnectionHook();
- ConnectionString::setConnectionHook(
- mongo::MockConnRegistry::get()->getConnStrHook());
- }
+ // Create a monitor with primary as the only seed list and the two secondaries
+ // down so a NULL connection object will be stored for these secondaries in
+ // the _nodes vector.
+ const string replSetName(replSet->getSetName());
+ set<HostAndPort> seedList;
+ seedList.insert(HostAndPort(replSet->getPrimary()));
+ ReplicaSetMonitor::createIfNeeded(replSetName, seedList);
+
+ replSet->kill(replSet->getPrimary());
+
+ ReplicaSetMonitorPtr monitor = ReplicaSetMonitor::get(replSet->getSetName());
+ // Trigger calls to Node::getConnWithRefresh
+ monitor->startOrContinueRefresh().refreshAll();
+}
- void tearDown() {
- ConnectionString::setConnectionHook(_originalConnectionHook);
- ReplicaSetMonitor::cleanup();
- _replSet.reset();
- mongo::ScopedDbConnection::clearPool();
+namespace {
+/**
+ * Takes a ReplicaSetConfig and a node to remove and returns a new config with equivalent
+ * members minus the one specified to be removed. NOTE: Does not copy over properties of the
+ * members other than their id and host.
+ */
+ReplicaSetConfig _getConfigWithMemberRemoved(const ReplicaSetConfig& oldConfig,
+ const HostAndPort& toRemove) {
+ BSONObjBuilder newConfigBuilder;
+ newConfigBuilder.append("_id", oldConfig.getReplSetName());
+ newConfigBuilder.append("version", oldConfig.getConfigVersion());
+
+ BSONArrayBuilder membersBuilder(newConfigBuilder.subarrayStart("members"));
+ for (ReplicaSetConfig::MemberIterator member = oldConfig.membersBegin();
+ member != oldConfig.membersEnd();
+ ++member) {
+ if (member->getHostAndPort() == toRemove) {
+ continue;
}
- MockReplicaSet* getReplSet() {
- return _replSet.get();
+ membersBuilder.append(
+ BSON("_id" << member->getId() << "host" << member->getHostAndPort().toString()));
+ }
+
+ membersBuilder.done();
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(newConfigBuilder.obj()));
+ ASSERT_OK(newConfig.validate());
+ return newConfig;
+}
+} // namespace
+
+// Stress test case for a node that is previously a primary being removed from the set.
+// This test goes through configurations with different positions for the primary node
+// in the host list returned from the isMaster command. The test here is to make sure
+// that the ReplicaSetMonitor will not crash under these situations.
+TEST(ReplicaSetMonitorTest, PrimaryRemovedFromSetStress) {
+ const size_t NODE_COUNT = 5;
+ MockReplicaSet replSet("test", NODE_COUNT);
+ ConnectionString::ConnectionHook* originalConnHook = ConnectionString::getConnectionHook();
+ ConnectionString::setConnectionHook(mongo::MockConnRegistry::get()->getConnStrHook());
+
+ const string replSetName(replSet.getSetName());
+ set<HostAndPort> seedList;
+ seedList.insert(HostAndPort(replSet.getPrimary()));
+ ReplicaSetMonitor::createIfNeeded(replSetName, seedList);
+
+ const ReplicaSetConfig& origConfig = replSet.getReplConfig();
+ mongo::ReplicaSetMonitorPtr replMonitor = ReplicaSetMonitor::get(replSetName);
+
+ for (size_t idxToRemove = 0; idxToRemove < NODE_COUNT; idxToRemove++) {
+ replSet.setConfig(origConfig);
+ // Make sure the monitor sees the change
+ replMonitor->startOrContinueRefresh().refreshAll();
+
+ string hostToRemove;
+ {
+ BSONObjBuilder monitorStateBuilder;
+ replMonitor->appendInfo(monitorStateBuilder);
+ BSONObj monitorState = monitorStateBuilder.done();
+
+ BSONElement hostsElem = monitorState["hosts"];
+ BSONElement addrElem = hostsElem[mongo::str::stream() << idxToRemove]["addr"];
+ hostToRemove = addrElem.String();
}
- private:
- ConnectionString::ConnectionHook* _originalConnectionHook;
- boost::scoped_ptr<MockReplicaSet> _replSet;
- };
-
- TEST_F(ReplicaSetMonitorTest, SeedWithPriOnlySecDown) {
- // Test to make sure that the monitor doesn't crash when
- // ConnectionString::connect returns NULL
- MockReplicaSet* replSet = getReplSet();
- replSet->kill(replSet->getSecondaries());
-
- // Create a monitor with primary as the only seed list and the two secondaries
- // down so a NULL connection object will be stored for these secondaries in
- // the _nodes vector.
- const string replSetName(replSet->getSetName());
- set<HostAndPort> seedList;
- seedList.insert(HostAndPort(replSet->getPrimary()));
- ReplicaSetMonitor::createIfNeeded(replSetName, seedList);
-
- replSet->kill(replSet->getPrimary());
-
- ReplicaSetMonitorPtr monitor = ReplicaSetMonitor::get(replSet->getSetName());
- // Trigger calls to Node::getConnWithRefresh
- monitor->startOrContinueRefresh().refreshAll();
+ replSet.setPrimary(hostToRemove);
+ // Make sure the monitor sees the new primary
+ replMonitor->startOrContinueRefresh().refreshAll();
+
+ mongo::repl::ReplicaSetConfig newConfig =
+ _getConfigWithMemberRemoved(origConfig, HostAndPort(hostToRemove));
+ replSet.setConfig(newConfig);
+ replSet.setPrimary(newConfig.getMemberAt(0).getHostAndPort().toString());
+ // Force refresh -> should not crash
+ replMonitor->startOrContinueRefresh().refreshAll();
}
-namespace {
- /**
- * Takes a ReplicaSetConfig and a node to remove and returns a new config with equivalent
- * members minus the one specified to be removed. NOTE: Does not copy over properties of the
- * members other than their id and host.
- */
- ReplicaSetConfig _getConfigWithMemberRemoved(
- const ReplicaSetConfig& oldConfig, const HostAndPort& toRemove) {
- BSONObjBuilder newConfigBuilder;
+ ReplicaSetMonitor::cleanup();
+ ConnectionString::setConnectionHook(originalConnHook);
+ mongo::ScopedDbConnection::clearPool();
+}
+
+/**
+ * Warning: Tests running this fixture cannot be run in parallel with other tests
+ * that use ConnectionString::setConnectionHook.
+ */
+class TwoNodeWithTags : public mongo::unittest::Test {
+protected:
+ void setUp() {
+ _replSet.reset(new MockReplicaSet("test", 2));
+ _originalConnectionHook = ConnectionString::getConnectionHook();
+ ConnectionString::setConnectionHook(mongo::MockConnRegistry::get()->getConnStrHook());
+
+ mongo::repl::ReplicaSetConfig oldConfig = _replSet->getReplConfig();
+
+ mongo::BSONObjBuilder newConfigBuilder;
newConfigBuilder.append("_id", oldConfig.getReplSetName());
newConfigBuilder.append("version", oldConfig.getConfigVersion());
- BSONArrayBuilder membersBuilder(newConfigBuilder.subarrayStart("members"));
- for (ReplicaSetConfig::MemberIterator member = oldConfig.membersBegin();
- member != oldConfig.membersEnd(); ++member) {
- if (member->getHostAndPort() == toRemove) {
- continue;
- }
+ mongo::BSONArrayBuilder membersBuilder(newConfigBuilder.subarrayStart("members"));
+
+ {
+ const string host(_replSet->getPrimary());
+ const mongo::repl::MemberConfig* member =
+ oldConfig.findMemberByHostAndPort(HostAndPort(host));
+ membersBuilder.append(
+ BSON("_id" << member->getId() << "host" << host << "tags" << BSON("dc"
+ << "ny"
+ << "num"
+ << "1")));
+ }
- membersBuilder.append(BSON("_id" << member->getId() <<
- "host" << member->getHostAndPort().toString()));
+ {
+ const string host(_replSet->getSecondaries().front());
+ const mongo::repl::MemberConfig* member =
+ oldConfig.findMemberByHostAndPort(HostAndPort(host));
+ membersBuilder.append(
+ BSON("_id" << member->getId() << "host" << host << "tags" << BSON("dc"
+ << "ny"
+ << "num"
+ << "2")));
}
membersBuilder.done();
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(newConfigBuilder.obj()));
- ASSERT_OK(newConfig.validate());
- return newConfig;
+ mongo::repl::ReplicaSetConfig newConfig;
+ fassert(28572, newConfig.initialize(newConfigBuilder.done()));
+ fassert(28571, newConfig.validate());
+ _replSet->setConfig(newConfig);
}
-} // namespace
-
- // Stress test case for a node that is previously a primary being removed from the set.
- // This test goes through configurations with different positions for the primary node
- // in the host list returned from the isMaster command. The test here is to make sure
- // that the ReplicaSetMonitor will not crash under these situations.
- TEST(ReplicaSetMonitorTest, PrimaryRemovedFromSetStress) {
- const size_t NODE_COUNT = 5;
- MockReplicaSet replSet("test", NODE_COUNT);
- ConnectionString::ConnectionHook* originalConnHook =
- ConnectionString::getConnectionHook();
- ConnectionString::setConnectionHook(mongo::MockConnRegistry::get()->getConnStrHook());
-
- const string replSetName(replSet.getSetName());
- set<HostAndPort> seedList;
- seedList.insert(HostAndPort(replSet.getPrimary()));
- ReplicaSetMonitor::createIfNeeded(replSetName, seedList);
-
- const ReplicaSetConfig& origConfig = replSet.getReplConfig();
- mongo::ReplicaSetMonitorPtr replMonitor = ReplicaSetMonitor::get(replSetName);
-
- for (size_t idxToRemove = 0; idxToRemove < NODE_COUNT; idxToRemove++) {
-
- replSet.setConfig(origConfig);
- // Make sure the monitor sees the change
- replMonitor->startOrContinueRefresh().refreshAll();
-
- string hostToRemove;
- {
- BSONObjBuilder monitorStateBuilder;
- replMonitor->appendInfo(monitorStateBuilder);
- BSONObj monitorState = monitorStateBuilder.done();
-
- BSONElement hostsElem = monitorState["hosts"];
- BSONElement addrElem = hostsElem[mongo::str::stream() << idxToRemove]["addr"];
- hostToRemove = addrElem.String();
- }
-
- replSet.setPrimary(hostToRemove);
- // Make sure the monitor sees the new primary
- replMonitor->startOrContinueRefresh().refreshAll();
-
- mongo::repl::ReplicaSetConfig newConfig = _getConfigWithMemberRemoved(
- origConfig, HostAndPort(hostToRemove));
- replSet.setConfig(newConfig);
- replSet.setPrimary(newConfig.getMemberAt(0).getHostAndPort().toString());
- // Force refresh -> should not crash
- replMonitor->startOrContinueRefresh().refreshAll();
- }
+ void tearDown() {
+ ConnectionString::setConnectionHook(_originalConnectionHook);
ReplicaSetMonitor::cleanup();
- ConnectionString::setConnectionHook(originalConnHook);
- mongo::ScopedDbConnection::clearPool();
+ _replSet.reset();
}
- /**
- * Warning: Tests running this fixture cannot be run in parallel with other tests
- * that use ConnectionString::setConnectionHook.
- */
- class TwoNodeWithTags: public mongo::unittest::Test {
- protected:
- void setUp() {
- _replSet.reset(new MockReplicaSet("test", 2));
- _originalConnectionHook = ConnectionString::getConnectionHook();
- ConnectionString::setConnectionHook(
- mongo::MockConnRegistry::get()->getConnStrHook());
-
- mongo::repl::ReplicaSetConfig oldConfig = _replSet->getReplConfig();
-
- mongo::BSONObjBuilder newConfigBuilder;
- newConfigBuilder.append("_id", oldConfig.getReplSetName());
- newConfigBuilder.append("version", oldConfig.getConfigVersion());
-
- mongo::BSONArrayBuilder membersBuilder(newConfigBuilder.subarrayStart("members"));
-
- {
- const string host(_replSet->getPrimary());
- const mongo::repl::MemberConfig* member =
- oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON("_id" << member->getId() <<
- "host" << host <<
- "tags" << BSON("dc" << "ny" <<
- "num" << "1")));
- }
-
- {
- const string host(_replSet->getSecondaries().front());
- const mongo::repl::MemberConfig* member =
- oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON("_id" << member->getId() <<
- "host" << host <<
- "tags" << BSON("dc" << "ny" <<
- "num" << "2")));
- }
-
- membersBuilder.done();
- mongo::repl::ReplicaSetConfig newConfig;
- fassert(28572, newConfig.initialize(newConfigBuilder.done()));
- fassert(28571, newConfig.validate());
- _replSet->setConfig(newConfig);
-
- }
-
- void tearDown() {
- ConnectionString::setConnectionHook(_originalConnectionHook);
- ReplicaSetMonitor::cleanup();
- _replSet.reset();
- }
-
- MockReplicaSet* getReplSet() {
- return _replSet.get();
- }
+ MockReplicaSet* getReplSet() {
+ return _replSet.get();
+ }
- private:
- ConnectionString::ConnectionHook* _originalConnectionHook;
- boost::scoped_ptr<MockReplicaSet> _replSet;
- };
+private:
+ ConnectionString::ConnectionHook* _originalConnectionHook;
+ boost::scoped_ptr<MockReplicaSet> _replSet;
+};
- // Tests the case where the connection to secondary went bad and the replica set
- // monitor needs to perform a refresh of it's local view then retry the node selection
- // again after the refresh.
- TEST_F(TwoNodeWithTags, SecDownRetryNoTag) {
- MockReplicaSet* replSet = getReplSet();
+// Tests the case where the connection to secondary went bad and the replica set
+// monitor needs to perform a refresh of it's local view then retry the node selection
+// again after the refresh.
+TEST_F(TwoNodeWithTags, SecDownRetryNoTag) {
+ MockReplicaSet* replSet = getReplSet();
- set<HostAndPort> seedList;
- seedList.insert(HostAndPort(replSet->getPrimary()));
- ReplicaSetMonitor::createIfNeeded(replSet->getSetName(), seedList);
+ set<HostAndPort> seedList;
+ seedList.insert(HostAndPort(replSet->getPrimary()));
+ ReplicaSetMonitor::createIfNeeded(replSet->getSetName(), seedList);
- const string secHost(replSet->getSecondaries().front());
- replSet->kill(secHost);
+ const string secHost(replSet->getSecondaries().front());
+ replSet->kill(secHost);
- ReplicaSetMonitorPtr monitor = ReplicaSetMonitor::get(replSet->getSetName());
- // Make sure monitor sees the dead secondary
- monitor->startOrContinueRefresh().refreshAll();
+ ReplicaSetMonitorPtr monitor = ReplicaSetMonitor::get(replSet->getSetName());
+ // Make sure monitor sees the dead secondary
+ monitor->startOrContinueRefresh().refreshAll();
- replSet->restore(secHost);
+ replSet->restore(secHost);
- HostAndPort node = monitor->getHostOrRefresh(
- ReadPreferenceSetting(mongo::ReadPreference_SecondaryOnly, TagSet()));
+ HostAndPort node = monitor->getHostOrRefresh(
+ ReadPreferenceSetting(mongo::ReadPreference_SecondaryOnly, TagSet()));
- ASSERT_FALSE(monitor->isPrimary(node));
- ASSERT_EQUALS(secHost, node.toString());
- }
+ ASSERT_FALSE(monitor->isPrimary(node));
+ ASSERT_EQUALS(secHost, node.toString());
+}
- // Tests the case where the connection to secondary went bad and the replica set
- // monitor needs to perform a refresh of it's local view then retry the node selection
- // with tags again after the refresh.
- TEST_F(TwoNodeWithTags, SecDownRetryWithTag) {
- MockReplicaSet* replSet = getReplSet();
+// Tests the case where the connection to secondary went bad and the replica set
+// monitor needs to perform a refresh of it's local view then retry the node selection
+// with tags again after the refresh.
+TEST_F(TwoNodeWithTags, SecDownRetryWithTag) {
+ MockReplicaSet* replSet = getReplSet();
- set<HostAndPort> seedList;
- seedList.insert(HostAndPort(replSet->getPrimary()));
- ReplicaSetMonitor::createIfNeeded(replSet->getSetName(), seedList);
+ set<HostAndPort> seedList;
+ seedList.insert(HostAndPort(replSet->getPrimary()));
+ ReplicaSetMonitor::createIfNeeded(replSet->getSetName(), seedList);
- const string secHost(replSet->getSecondaries().front());
- replSet->kill(secHost);
+ const string secHost(replSet->getSecondaries().front());
+ replSet->kill(secHost);
- ReplicaSetMonitorPtr monitor = ReplicaSetMonitor::get(replSet->getSetName());
- // Make sure monitor sees the dead secondary
- monitor->startOrContinueRefresh().refreshAll();
+ ReplicaSetMonitorPtr monitor = ReplicaSetMonitor::get(replSet->getSetName());
+ // Make sure monitor sees the dead secondary
+ monitor->startOrContinueRefresh().refreshAll();
- replSet->restore(secHost);
+ replSet->restore(secHost);
- TagSet tags(BSON_ARRAY(BSON("dc" << "ny")));
- HostAndPort node = monitor->getHostOrRefresh(
- ReadPreferenceSetting(mongo::ReadPreference_SecondaryOnly, tags));
+ TagSet tags(BSON_ARRAY(BSON("dc"
+ << "ny")));
+ HostAndPort node =
+ monitor->getHostOrRefresh(ReadPreferenceSetting(mongo::ReadPreference_SecondaryOnly, tags));
- ASSERT_FALSE(monitor->isPrimary(node));
- ASSERT_EQUALS(secHost, node.toString());
- }
+ ASSERT_FALSE(monitor->isPrimary(node));
+ ASSERT_EQUALS(secHost, node.toString());
+}
}
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 420b3975bd6..763ffdcf30d 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -54,1465 +54,1468 @@ using namespace mongo::repl;
namespace ReplTests {
- using std::auto_ptr;
- using std::endl;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- BSONObj f( const char *s ) {
- return fromjson( s );
- }
-
- class Base {
- protected:
- repl::ReplicationCoordinator* _prevGlobGoordinator;
- mutable OperationContextImpl _txn;
- mutable DBDirectClient _client;
-
- public:
- Base() : _prevGlobGoordinator(getGlobalReplicationCoordinator())
- , _client(&_txn) {
- ReplSettings replSettings;
- replSettings.oplogSize = 5 * 1024 * 1024;
- replSettings.master = true;
- ReplicationCoordinatorMock* replCoord = new ReplicationCoordinatorMock(replSettings);
- setGlobalReplicationCoordinator(replCoord);
-
- oldRepl();
- createOplog(&_txn);
-
- Client::WriteContext ctx(&_txn, ns());
- WriteUnitOfWork wuow(&_txn);
-
- Collection* c = ctx.ctx().db()->getCollection(ns());
- if ( ! c ) {
- c = ctx.ctx().db()->createCollection(&_txn, ns());
- }
-
- ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn));
- wuow.commit();
+using std::auto_ptr;
+using std::endl;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+BSONObj f(const char* s) {
+ return fromjson(s);
+}
+
+class Base {
+protected:
+ repl::ReplicationCoordinator* _prevGlobGoordinator;
+ mutable OperationContextImpl _txn;
+ mutable DBDirectClient _client;
+
+public:
+ Base() : _prevGlobGoordinator(getGlobalReplicationCoordinator()), _client(&_txn) {
+ ReplSettings replSettings;
+ replSettings.oplogSize = 5 * 1024 * 1024;
+ replSettings.master = true;
+ ReplicationCoordinatorMock* replCoord = new ReplicationCoordinatorMock(replSettings);
+ setGlobalReplicationCoordinator(replCoord);
+
+ oldRepl();
+ createOplog(&_txn);
+
+ Client::WriteContext ctx(&_txn, ns());
+ WriteUnitOfWork wuow(&_txn);
+
+ Collection* c = ctx.ctx().db()->getCollection(ns());
+ if (!c) {
+ c = ctx.ctx().db()->createCollection(&_txn, ns());
}
- ~Base() {
- try {
- delete getGlobalReplicationCoordinator();
- setGlobalReplicationCoordinator(_prevGlobGoordinator);
- _prevGlobGoordinator = NULL;
-
- deleteAll( ns() );
- deleteAll( cllNS() );
- }
- catch ( ... ) {
- FAIL( "Exception while cleaning up test" );
- }
- }
- protected:
- static const char *ns() {
- return "unittests.repltests";
- }
- static const char *cllNS() {
- return "local.oplog.$main";
+
+ ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn));
+ wuow.commit();
+ }
+ ~Base() {
+ try {
+ delete getGlobalReplicationCoordinator();
+ setGlobalReplicationCoordinator(_prevGlobGoordinator);
+ _prevGlobGoordinator = NULL;
+
+ deleteAll(ns());
+ deleteAll(cllNS());
+ } catch (...) {
+ FAIL("Exception while cleaning up test");
}
- BSONObj one( const BSONObj &query = BSONObj() ) const {
- return _client.findOne( ns(), query );
+ }
+
+protected:
+ static const char* ns() {
+ return "unittests.repltests";
+ }
+ static const char* cllNS() {
+ return "local.oplog.$main";
+ }
+ BSONObj one(const BSONObj& query = BSONObj()) const {
+ return _client.findOne(ns(), query);
+ }
+ void checkOne(const BSONObj& o) const {
+ check(o, one(o));
+ }
+ void checkAll(const BSONObj& o) const {
+ auto_ptr<DBClientCursor> c = _client.query(ns(), o);
+ verify(c->more());
+ while (c->more()) {
+ check(o, c->next());
}
- void checkOne( const BSONObj &o ) const {
- check( o, one( o ) );
+ }
+ void check(const BSONObj& expected, const BSONObj& got) const {
+ if (expected.woCompare(got)) {
+ ::mongo::log() << "expected: " << expected.toString() << ", got: " << got.toString()
+ << endl;
}
- void checkAll( const BSONObj &o ) const {
- auto_ptr< DBClientCursor > c = _client.query( ns(), o );
- verify( c->more() );
- while( c->more() ) {
- check( o, c->next() );
- }
+ ASSERT_EQUALS(expected, got);
+ }
+ BSONObj oneOp() const {
+ return _client.findOne(cllNS(), BSONObj());
+ }
+ int count() const {
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ Client::Context ctx(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ WriteUnitOfWork wunit(&_txn);
+ coll = db->createCollection(&_txn, ns());
+ wunit.commit();
}
- void check( const BSONObj &expected, const BSONObj &got ) const {
- if ( expected.woCompare( got ) ) {
- ::mongo::log() << "expected: " << expected.toString()
- << ", got: " << got.toString() << endl;
- }
- ASSERT_EQUALS( expected , got );
+
+ int count = 0;
+ RecordIterator* it = coll->getIterator(&_txn);
+ for (; !it->isEOF(); it->getNext()) {
+ ++count;
}
- BSONObj oneOp() const {
- return _client.findOne( cllNS(), BSONObj() );
+ delete it;
+ return count;
+ }
+ int opCount() {
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ Client::Context ctx(&_txn, cllNS());
+
+ Database* db = ctx.db();
+ Collection* coll = db->getCollection(cllNS());
+ if (!coll) {
+ WriteUnitOfWork wunit(&_txn);
+ coll = db->createCollection(&_txn, cllNS());
+ wunit.commit();
}
- int count() const {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- Client::Context ctx(&_txn, ns() );
- Database* db = ctx.db();
- Collection* coll = db->getCollection( ns() );
- if ( !coll ) {
- WriteUnitOfWork wunit(&_txn);
- coll = db->createCollection( &_txn, ns() );
- wunit.commit();
- }
- int count = 0;
- RecordIterator* it = coll->getIterator(&_txn);
- for ( ; !it->isEOF(); it->getNext() ) {
- ++count;
- }
- delete it;
- return count;
+ int count = 0;
+ RecordIterator* it = coll->getIterator(&_txn);
+ for (; !it->isEOF(); it->getNext()) {
+ ++count;
}
- int opCount() {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- Client::Context ctx(&_txn, cllNS() );
-
+ delete it;
+ return count;
+ }
+ void applyAllOperations() {
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ vector<BSONObj> ops;
+ {
+ Client::Context ctx(&_txn, cllNS());
Database* db = ctx.db();
- Collection* coll = db->getCollection( cllNS() );
- if ( !coll ) {
- WriteUnitOfWork wunit(&_txn);
- coll = db->createCollection( &_txn, cllNS() );
- wunit.commit();
- }
+ Collection* coll = db->getCollection(cllNS());
- int count = 0;
RecordIterator* it = coll->getIterator(&_txn);
- for ( ; !it->isEOF(); it->getNext() ) {
- ++count;
+ while (!it->isEOF()) {
+ RecordId currLoc = it->getNext();
+ ops.push_back(coll->docFor(&_txn, currLoc).value());
}
delete it;
- return count;
}
- void applyAllOperations() {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- vector< BSONObj > ops;
- {
- Client::Context ctx(&_txn, cllNS() );
- Database* db = ctx.db();
- Collection* coll = db->getCollection( cllNS() );
-
- RecordIterator* it = coll->getIterator(&_txn);
- while ( !it->isEOF() ) {
- RecordId currLoc = it->getNext();
- ops.push_back(coll->docFor(&_txn, currLoc).value());
- }
- delete it;
- }
- {
- Client::Context ctx(&_txn, ns() );
- BSONObjBuilder b;
- b.append("host", "localhost");
- b.appendTimestamp("syncedTo", 0);
- ReplSource a(&_txn, b.obj());
- for( vector< BSONObj >::iterator i = ops.begin(); i != ops.end(); ++i ) {
- if ( 0 ) {
- mongo::unittest::log() << "op: " << *i << endl;
- }
- a.applyOperation( &_txn, ctx.db(), *i );
+ {
+ Client::Context ctx(&_txn, ns());
+ BSONObjBuilder b;
+ b.append("host", "localhost");
+ b.appendTimestamp("syncedTo", 0);
+ ReplSource a(&_txn, b.obj());
+ for (vector<BSONObj>::iterator i = ops.begin(); i != ops.end(); ++i) {
+ if (0) {
+ mongo::unittest::log() << "op: " << *i << endl;
}
+ a.applyOperation(&_txn, ctx.db(), *i);
}
}
- void printAll( const char *ns ) {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- Client::Context ctx(&_txn, ns );
+ }
+ void printAll(const char* ns) {
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ Client::Context ctx(&_txn, ns);
+
+ Database* db = ctx.db();
+ Collection* coll = db->getCollection(ns);
+ if (!coll) {
+ WriteUnitOfWork wunit(&_txn);
+ coll = db->createCollection(&_txn, ns);
+ wunit.commit();
+ }
- Database* db = ctx.db();
- Collection* coll = db->getCollection( ns );
- if ( !coll ) {
- WriteUnitOfWork wunit(&_txn);
- coll = db->createCollection( &_txn, ns );
- wunit.commit();
- }
+ RecordIterator* it = coll->getIterator(&_txn);
+ ::mongo::log() << "all for " << ns << endl;
+ while (!it->isEOF()) {
+ RecordId currLoc = it->getNext();
+ ::mongo::log() << coll->docFor(&_txn, currLoc).value().toString() << endl;
+ }
+ delete it;
+ }
+ // These deletes don't get logged.
+ void deleteAll(const char* ns) const {
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ Client::Context ctx(&_txn, ns);
+ WriteUnitOfWork wunit(&_txn);
+ Database* db = ctx.db();
+ Collection* coll = db->getCollection(ns);
+ if (!coll) {
+ coll = db->createCollection(&_txn, ns);
+ }
- RecordIterator* it = coll->getIterator(&_txn);
- ::mongo::log() << "all for " << ns << endl;
- while ( !it->isEOF() ) {
- RecordId currLoc = it->getNext();
- ::mongo::log() << coll->docFor(&_txn, currLoc).value().toString() << endl;
- }
- delete it;
+ vector<RecordId> toDelete;
+ RecordIterator* it = coll->getIterator(&_txn);
+ while (!it->isEOF()) {
+ toDelete.push_back(it->getNext());
+ }
+ delete it;
+ for (vector<RecordId>::iterator i = toDelete.begin(); i != toDelete.end(); ++i) {
+ coll->deleteDocument(&_txn, *i, true);
+ }
+ wunit.commit();
+ }
+ void insert(const BSONObj& o) const {
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+ Client::Context ctx(&_txn, ns());
+ WriteUnitOfWork wunit(&_txn);
+ Database* db = ctx.db();
+ Collection* coll = db->getCollection(ns());
+ if (!coll) {
+ coll = db->createCollection(&_txn, ns());
}
- // These deletes don't get logged.
- void deleteAll( const char *ns ) const {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- Client::Context ctx(&_txn, ns );
- WriteUnitOfWork wunit(&_txn);
- Database* db = ctx.db();
- Collection* coll = db->getCollection( ns );
- if ( !coll ) {
- coll = db->createCollection( &_txn, ns );
- }
- vector< RecordId > toDelete;
- RecordIterator* it = coll->getIterator(&_txn);
- while ( !it->isEOF() ) {
- toDelete.push_back( it->getNext() );
- }
- delete it;
- for( vector< RecordId >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
- coll->deleteDocument( &_txn, *i, true );
- }
+ if (o.hasField("_id")) {
+ coll->insertDocument(&_txn, o, true);
wunit.commit();
+ return;
}
- void insert( const BSONObj &o ) const {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- Client::Context ctx(&_txn, ns() );
- WriteUnitOfWork wunit(&_txn);
- Database* db = ctx.db();
- Collection* coll = db->getCollection( ns() );
- if ( !coll ) {
- coll = db->createCollection( &_txn, ns() );
- }
- if ( o.hasField( "_id" ) ) {
- coll->insertDocument( &_txn, o, true );
- wunit.commit();
- return;
- }
+ class BSONObjBuilder b;
+ OID id;
+ id.init();
+ b.appendOID("_id", &id);
+ b.appendElements(o);
+ coll->insertDocument(&_txn, b.obj(), true);
+ wunit.commit();
+ }
+ static BSONObj wid(const char* json) {
+ class BSONObjBuilder b;
+ OID id;
+ id.init();
+ b.appendOID("_id", &id);
+ b.appendElements(fromjson(json));
+ return b.obj();
+ }
+};
- class BSONObjBuilder b;
- OID id;
- id.init();
- b.appendOID( "_id", &id );
- b.appendElements( o );
- coll->insertDocument( &_txn, b.obj(), true );
- wunit.commit();
- }
- static BSONObj wid( const char *json ) {
- class BSONObjBuilder b;
- OID id;
- id.init();
- b.appendOID( "_id", &id );
- b.appendElements( fromjson( json ) );
- return b.obj();
- }
- };
+class LogBasic : public Base {
+public:
+ void run() {
+ ASSERT_EQUALS(1, opCount());
+ _client.insert(ns(), fromjson("{\"a\":\"b\"}"));
+ ASSERT_EQUALS(2, opCount());
+ }
+};
+
+namespace Idempotence {
+
+class Base : public ReplTests::Base {
+public:
+ virtual ~Base() {}
+ void run() {
+ reset();
+ doIt();
+ int nOps = opCount();
+ check();
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS(nOps, opCount());
+
+ reset();
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS(nOps, opCount());
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS(nOps, opCount());
+ }
- class LogBasic : public Base {
- public:
- void run() {
- ASSERT_EQUALS( 1, opCount() );
- _client.insert( ns(), fromjson( "{\"a\":\"b\"}" ) );
- ASSERT_EQUALS( 2, opCount() );
- }
- };
-
- namespace Idempotence {
-
- class Base : public ReplTests::Base {
- public:
- virtual ~Base() {}
- void run() {
- reset();
- doIt();
- int nOps = opCount();
- check();
- applyAllOperations();
- check();
- ASSERT_EQUALS( nOps, opCount() );
-
- reset();
- applyAllOperations();
- check();
- ASSERT_EQUALS( nOps, opCount() );
- applyAllOperations();
- check();
- ASSERT_EQUALS( nOps, opCount() );
- }
- protected:
- virtual void doIt() const = 0;
- virtual void check() const = 0;
- virtual void reset() const = 0;
- };
-
- class InsertTimestamp : public Base {
- public:
- void doIt() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- b.appendTimestamp( "t" );
- _client.insert( ns(), b.done() );
- date_ = _client.findOne( ns(), QUERY( "a" << 1 ) ).getField( "t" ).date();
- }
- void check() const {
- BSONObj o = _client.findOne( ns(), QUERY( "a" << 1 ) );
- ASSERT( 0 != o.getField( "t" ).date() );
- ASSERT_EQUALS( date_, o.getField( "t" ).date() );
- }
- void reset() const {
- deleteAll( ns() );
- }
- private:
- mutable Date_t date_;
- };
-
- class InsertAutoId : public Base {
- public:
- InsertAutoId() : o_( fromjson( "{\"a\":\"b\"}" ) ) {}
- void doIt() const {
- _client.insert( ns(), o_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- }
- void reset() const {
- deleteAll( ns() );
- }
- protected:
- BSONObj o_;
- };
-
- class InsertWithId : public InsertAutoId {
- public:
- InsertWithId() {
- o_ = fromjson( "{\"_id\":ObjectId(\"0f0f0f0f0f0f0f0f0f0f0f0f\"),\"a\":\"b\"}" );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( o_ );
- }
- };
-
- class InsertTwo : public Base {
- public:
- InsertTwo() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- t_( fromjson( "{'_id':2,c:'d'}" ) ) {}
- void doIt() const {
- vector< BSONObj > v;
- v.push_back( o_ );
- v.push_back( t_ );
- _client.insert( ns(), v );
- }
- void check() const {
- ASSERT_EQUALS( 2, count() );
- checkOne( o_ );
- checkOne( t_ );
- }
- void reset() const {
- deleteAll( ns() );
- }
- private:
- BSONObj o_;
- BSONObj t_;
- };
-
- class InsertTwoIdentical : public Base {
- public:
- InsertTwoIdentical() : o_( fromjson( "{\"a\":\"b\"}" ) ) {}
- void doIt() const {
- _client.insert( ns(), o_ );
- _client.insert( ns(), o_ );
- }
- void check() const {
- ASSERT_EQUALS( 2, count() );
- }
- void reset() const {
- deleteAll( ns() );
- }
- private:
- BSONObj o_;
- };
-
- class UpdateTimestamp : public Base {
- public:
- void doIt() const {
- BSONObjBuilder b;
- b.append( "_id", 1 );
- b.appendTimestamp( "t" );
- _client.update( ns(), BSON( "_id" << 1 ), b.done() );
- date_ = _client.findOne( ns(), QUERY( "_id" << 1 ) ).getField( "t" ).date();
- }
- void check() const {
- BSONObj o = _client.findOne( ns(), QUERY( "_id" << 1 ) );
- ASSERT( 0 != o.getField( "t" ).date() );
- ASSERT_EQUALS( date_, o.getField( "t" ).date() );
- }
- void reset() const {
- deleteAll( ns() );
- insert( BSON( "_id" << 1 ) );
- }
- private:
- mutable Date_t date_;
- };
-
- class UpdateSameField : public Base {
- public:
- UpdateSameField() :
- q_( fromjson( "{a:'b'}" ) ),
- o1_( wid( "{a:'b'}" ) ),
- o2_( wid( "{a:'b'}" ) ),
- u_( fromjson( "{a:'c'}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 2, count() );
- ASSERT( !_client.findOne( ns(), q_ ).isEmpty() );
- ASSERT( !_client.findOne( ns(), u_ ).isEmpty() );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o1_ );
- insert( o2_ );
- }
- private:
- BSONObj q_, o1_, o2_, u_;
- };
-
- class UpdateSameFieldWithId : public Base {
- public:
- UpdateSameFieldWithId() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- q_( fromjson( "{a:'b'}" ) ),
- u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 2, count() );
- ASSERT( !_client.findOne( ns(), q_ ).isEmpty() );
- ASSERT( !_client.findOne( ns(), u_ ).isEmpty() );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- insert( fromjson( "{'_id':2,a:'b'}" ) );
- }
- private:
- BSONObj o_, q_, u_;
- };
-
- class UpdateSameFieldExplicitId : public Base {
- public:
- UpdateSameFieldExplicitId() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
- void doIt() const {
- _client.update( ns(), o_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( u_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- }
- protected:
- BSONObj o_, u_;
- };
-
- class UpdateDifferentFieldExplicitId : public Base {
- public:
- UpdateDifferentFieldExplicitId() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- q_( fromjson( "{'_id':1}" ) ),
- u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( u_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- }
- protected:
- BSONObj o_, q_, u_;
- };
+protected:
+ virtual void doIt() const = 0;
+ virtual void check() const = 0;
+ virtual void reset() const = 0;
+};
+
+class InsertTimestamp : public Base {
+public:
+ void doIt() const {
+ BSONObjBuilder b;
+ b.append("a", 1);
+ b.appendTimestamp("t");
+ _client.insert(ns(), b.done());
+ date_ = _client.findOne(ns(), QUERY("a" << 1)).getField("t").date();
+ }
+ void check() const {
+ BSONObj o = _client.findOne(ns(), QUERY("a" << 1));
+ ASSERT(0 != o.getField("t").date());
+ ASSERT_EQUALS(date_, o.getField("t").date());
+ }
+ void reset() const {
+ deleteAll(ns());
+ }
- class UpsertUpdateNoMods : public UpdateDifferentFieldExplicitId {
- void doIt() const {
- _client.update( ns(), q_, u_, true );
- }
- };
+private:
+ mutable Date_t date_;
+};
- class UpsertInsertNoMods : public InsertAutoId {
- void doIt() const {
- _client.update( ns(), fromjson( "{a:'c'}" ), o_, true );
- }
- };
-
- class UpdateSet : public Base {
- public:
- UpdateSet() :
- o_( fromjson( "{'_id':1,a:5}" ) ),
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$set:{a:7}}" ) ),
- ou_( fromjson( "{'_id':1,a:7}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( ou_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- }
- protected:
- BSONObj o_, q_, u_, ou_;
- };
-
- class UpdateInc : public Base {
- public:
- UpdateInc() :
- o_( fromjson( "{'_id':1,a:5}" ) ),
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$inc:{a:3}}" ) ),
- ou_( fromjson( "{'_id':1,a:8}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( ou_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- }
- protected:
- BSONObj o_, q_, u_, ou_;
- };
-
- class UpdateInc2 : public Base {
- public:
- UpdateInc2() :
- o_( fromjson( "{'_id':1,a:5}" ) ),
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$inc:{a:3},$set:{x:5}}" ) ),
- ou_( fromjson( "{'_id':1,a:8,x:5}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( ou_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- }
- protected:
- BSONObj o_, q_, u_, ou_;
- };
-
- class IncEmbedded : public Base {
- public:
- IncEmbedded() :
- o_( fromjson( "{'_id':1,a:{b:3},b:{b:1}}" ) ),
- q_( fromjson( "{'_id':1}" ) ),
- u_( fromjson( "{$inc:{'a.b':1,'b.b':1}}" ) ),
- ou_( fromjson( "{'_id':1,a:{b:4},b:{b:2}}" ) )
- {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( ou_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- }
- protected:
- BSONObj o_, q_, u_, ou_;
- };
-
- class IncCreates : public Base {
- public:
- IncCreates() :
- o_( fromjson( "{'_id':1}" ) ),
- q_( fromjson( "{'_id':1}" ) ),
- u_( fromjson( "{$inc:{'a':1}}" ) ),
- ou_( fromjson( "{'_id':1,a:1}") )
- {}
- void doIt() const {
- _client.update( ns(), q_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( ou_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o_ );
- }
- protected:
- BSONObj o_, q_, u_, ou_;
- };
-
-
- class UpsertInsertIdMod : public Base {
- public:
- UpsertInsertIdMod() :
- q_( fromjson( "{'_id':5,a:4}" ) ),
- u_( fromjson( "{$inc:{a:3}}" ) ),
- ou_( fromjson( "{'_id':5,a:7}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_, true );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( ou_ );
- }
- void reset() const {
- deleteAll( ns() );
- }
- protected:
- BSONObj q_, u_, ou_;
- };
-
- class UpsertInsertSet : public Base {
- public:
- UpsertInsertSet() :
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$set:{a:7}}" ) ),
- ou_( fromjson( "{a:7}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_, true );
- }
- void check() const {
- ASSERT_EQUALS( 2, count() );
- ASSERT( !_client.findOne( ns(), ou_ ).isEmpty() );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':7,a:7}" ) );
- }
- protected:
- BSONObj o_, q_, u_, ou_;
- };
-
- class UpsertInsertInc : public Base {
- public:
- UpsertInsertInc() :
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$inc:{a:3}}" ) ),
- ou_( fromjson( "{a:8}" ) ) {}
- void doIt() const {
- _client.update( ns(), q_, u_, true );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- ASSERT( !_client.findOne( ns(), ou_ ).isEmpty() );
- }
- void reset() const {
- deleteAll( ns() );
- }
- protected:
- BSONObj o_, q_, u_, ou_;
- };
-
- class MultiInc : public Base {
- public:
-
- string s() const {
- stringstream ss;
- auto_ptr<DBClientCursor> cc = _client.query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
- bool first = true;
- while ( cc->more() ) {
- if ( first ) first = false;
- else ss << ",";
-
- BSONObj o = cc->next();
- ss << o["x"].numberInt();
- }
- return ss.str();
- }
+class InsertAutoId : public Base {
+public:
+ InsertAutoId() : o_(fromjson("{\"a\":\"b\"}")) {}
+ void doIt() const {
+ _client.insert(ns(), o_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ }
+ void reset() const {
+ deleteAll(ns());
+ }
- void doIt() const {
- _client.insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
- _client.insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
+protected:
+ BSONObj o_;
+};
- ASSERT_EQUALS( "1,5" , s() );
+class InsertWithId : public InsertAutoId {
+public:
+ InsertWithId() {
+ o_ = fromjson("{\"_id\":ObjectId(\"0f0f0f0f0f0f0f0f0f0f0f0f\"),\"a\":\"b\"}");
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(o_);
+ }
+};
+
+class InsertTwo : public Base {
+public:
+ InsertTwo() : o_(fromjson("{'_id':1,a:'b'}")), t_(fromjson("{'_id':2,c:'d'}")) {}
+ void doIt() const {
+ vector<BSONObj> v;
+ v.push_back(o_);
+ v.push_back(t_);
+ _client.insert(ns(), v);
+ }
+ void check() const {
+ ASSERT_EQUALS(2, count());
+ checkOne(o_);
+ checkOne(t_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ }
- _client.update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
- ASSERT_EQUALS( "2,5" , s() );
+private:
+ BSONObj o_;
+ BSONObj t_;
+};
+
+class InsertTwoIdentical : public Base {
+public:
+ InsertTwoIdentical() : o_(fromjson("{\"a\":\"b\"}")) {}
+ void doIt() const {
+ _client.insert(ns(), o_);
+ _client.insert(ns(), o_);
+ }
+ void check() const {
+ ASSERT_EQUALS(2, count());
+ }
+ void reset() const {
+ deleteAll(ns());
+ }
- _client.update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) );
- ASSERT_EQUALS( "3,5" , s() );
+private:
+ BSONObj o_;
+};
+
+class UpdateTimestamp : public Base {
+public:
+ void doIt() const {
+ BSONObjBuilder b;
+ b.append("_id", 1);
+ b.appendTimestamp("t");
+ _client.update(ns(), BSON("_id" << 1), b.done());
+ date_ = _client.findOne(ns(), QUERY("_id" << 1)).getField("t").date();
+ }
+ void check() const {
+ BSONObj o = _client.findOne(ns(), QUERY("_id" << 1));
+ ASSERT(0 != o.getField("t").date());
+ ASSERT_EQUALS(date_, o.getField("t").date());
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(BSON("_id" << 1));
+ }
- _client.update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
- check();
- }
+private:
+ mutable Date_t date_;
+};
+
+class UpdateSameField : public Base {
+public:
+ UpdateSameField()
+ : q_(fromjson("{a:'b'}")),
+ o1_(wid("{a:'b'}")),
+ o2_(wid("{a:'b'}")),
+ u_(fromjson("{a:'c'}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(2, count());
+ ASSERT(!_client.findOne(ns(), q_).isEmpty());
+ ASSERT(!_client.findOne(ns(), u_).isEmpty());
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o1_);
+ insert(o2_);
+ }
- void check() const {
- ASSERT_EQUALS( "4,6" , s() );
- }
+private:
+ BSONObj q_, o1_, o2_, u_;
+};
+
+class UpdateSameFieldWithId : public Base {
+public:
+ UpdateSameFieldWithId()
+ : o_(fromjson("{'_id':1,a:'b'}")),
+ q_(fromjson("{a:'b'}")),
+ u_(fromjson("{'_id':1,a:'c'}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(2, count());
+ ASSERT(!_client.findOne(ns(), q_).isEmpty());
+ ASSERT(!_client.findOne(ns(), u_).isEmpty());
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ insert(fromjson("{'_id':2,a:'b'}"));
+ }
- void reset() const {
- deleteAll( ns() );
- }
- };
-
- class UpdateWithoutPreexistingId : public Base {
- public:
- UpdateWithoutPreexistingId() :
- o_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{a:5}" ) ),
- ot_( fromjson( "{b:4}" ) ) {}
- void doIt() const {
- _client.update( ns(), o_, u_ );
- }
- void check() const {
- ASSERT_EQUALS( 2, count() );
- checkOne( u_ );
- checkOne( ot_ );
- }
- void reset() const {
- deleteAll( ns() );
- insert( ot_ );
- insert( o_ );
- }
- protected:
- BSONObj o_, u_, ot_;
- };
-
- class Remove : public Base {
- public:
- Remove() :
- o1_( f( "{\"_id\":\"010101010101010101010101\",\"a\":\"b\"}" ) ),
- o2_( f( "{\"_id\":\"010101010101010101010102\",\"a\":\"b\"}" ) ),
- q_( f( "{\"a\":\"b\"}" ) ) {}
- void doIt() const {
- _client.remove( ns(), q_ );
- }
- void check() const {
- ASSERT_EQUALS( 0, count() );
- }
- void reset() const {
- deleteAll( ns() );
- insert( o1_ );
- insert( o2_ );
- }
- protected:
- BSONObj o1_, o2_, q_;
- };
+private:
+ BSONObj o_, q_, u_;
+};
- class RemoveOne : public Remove {
- void doIt() const {
- _client.remove( ns(), q_, true );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- }
- };
-
- class FailingUpdate : public Base {
- public:
- FailingUpdate() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- u_( fromjson( "{'_id':1,c:'d'}" ) ) {}
- void doIt() const {
- _client.update( ns(), o_, u_ );
- _client.insert( ns(), o_ );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( o_ );
- }
- void reset() const {
- deleteAll( ns() );
- }
- protected:
- BSONObj o_, u_;
- };
-
- class SetNumToStr : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), BSON( "$set" << BSON( "a" << "bcd" ) ) );
- }
- void check() const {
- ASSERT_EQUALS( 1, count() );
- checkOne( BSON( "_id" << 0 << "a" << "bcd" ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( BSON( "_id" << 0 << "a" << 4.0 ) );
- }
- };
+class UpdateSameFieldExplicitId : public Base {
+public:
+ UpdateSameFieldExplicitId()
+ : o_(fromjson("{'_id':1,a:'b'}")), u_(fromjson("{'_id':1,a:'c'}")) {}
+ void doIt() const {
+ _client.update(ns(), o_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(u_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ }
- class Push : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
- };
+protected:
+ BSONObj o_, u_;
+};
+
+class UpdateDifferentFieldExplicitId : public Base {
+public:
+ UpdateDifferentFieldExplicitId()
+ : o_(fromjson("{'_id':1,a:'b'}")),
+ q_(fromjson("{'_id':1}")),
+ u_(fromjson("{'_id':1,a:'c'}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(u_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ }
- class PushUpsert : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ), true );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_;
+};
- class MultiPush : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) << "$push" << BSON( "b.c" << 6.0 ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[4,5],b:{c:[6]}}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
- };
+class UpsertUpdateNoMods : public UpdateDifferentFieldExplicitId {
+ void doIt() const {
+ _client.update(ns(), q_, u_, true);
+ }
+};
- class EmptyPush : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[5]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0}" ) );
- }
- };
-
- class EmptyPushSparseIndex : public EmptyPush {
- public:
- EmptyPushSparseIndex() {
- _client.insert( "unittests.system.indexes",
- BSON( "ns" << ns() << "key" << BSON( "a" << 1 ) <<
- "name" << "foo" << "sparse" << true ) );
- }
- ~EmptyPushSparseIndex() {
- _client.dropIndexes( ns() );
- }
- };
+class UpsertInsertNoMods : public InsertAutoId {
+ void doIt() const {
+ _client.update(ns(), fromjson("{a:'c'}"), o_, true);
+ }
+};
+
+class UpdateSet : public Base {
+public:
+ UpdateSet()
+ : o_(fromjson("{'_id':1,a:5}")),
+ q_(fromjson("{a:5}")),
+ u_(fromjson("{$set:{a:7}}")),
+ ou_(fromjson("{'_id':1,a:7}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(ou_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ }
- class PushAll : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[4,5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_, ou_;
+};
+
+class UpdateInc : public Base {
+public:
+ UpdateInc()
+ : o_(fromjson("{'_id':1,a:5}")),
+ q_(fromjson("{a:5}")),
+ u_(fromjson("{$inc:{a:3}}")),
+ ou_(fromjson("{'_id':1,a:8}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(ou_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ }
- class PushWithDollarSigns : public Base {
- void doIt() const {
- _client.update( ns(),
- BSON( "_id" << 0),
- BSON( "$push" << BSON( "a" << BSON( "$foo" << 1 ) ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0, a:[0, {'$foo':1}]}"), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( BSON( "_id" << 0 << "a" << BSON_ARRAY( 0 ) ) );
- }
- };
-
- class PushSlice : public Base {
- void doIt() const {
- _client.update( ns(),
- BSON( "_id" << 0),
- BSON( "$push" <<
- BSON( "a" <<
- BSON( "$each" << BSON_ARRAY(3) <<
- "$slice" << -2 ) ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0, a:[2,3]}"), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( BSON( "_id" << 0 << "a" << BSON_ARRAY( 1 << 2 ) ) );
- }
- };
-
- class PushSliceInitiallyInexistent : public Base {
- void doIt() const {
- _client.update( ns(),
- BSON( "_id" << 0),
- BSON( "$push" <<
- BSON( "a" <<
- BSON( "$each" << BSON_ARRAY(1<<2) <<
- "$slice" << -2 ) ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0, a:[1,2] }"), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( BSON( "_id" << 0 ) );
- }
- };
-
- class PushSliceToZero : public Base {
- void doIt() const {
- _client.update( ns(),
- BSON( "_id" << 0),
- BSON( "$push" <<
- BSON( "a" <<
- BSON( "$each" << BSON_ARRAY(3) <<
- "$slice" << 0 ) ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0, a:[]}"), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( BSON( "_id" << 0 ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_, ou_;
+};
+
+class UpdateInc2 : public Base {
+public:
+ UpdateInc2()
+ : o_(fromjson("{'_id':1,a:5}")),
+ q_(fromjson("{a:5}")),
+ u_(fromjson("{$inc:{a:3},$set:{x:5}}")),
+ ou_(fromjson("{'_id':1,a:8,x:5}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(ou_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ }
- class PushAllUpsert : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ), true );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[4,5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_, ou_;
+};
+
+class IncEmbedded : public Base {
+public:
+ IncEmbedded()
+ : o_(fromjson("{'_id':1,a:{b:3},b:{b:1}}")),
+ q_(fromjson("{'_id':1}")),
+ u_(fromjson("{$inc:{'a.b':1,'b.b':1}}")),
+ ou_(fromjson("{'_id':1,a:{b:4},b:{b:2}}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(ou_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ }
- class EmptyPushAll : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0}" ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_, ou_;
+};
+
+class IncCreates : public Base {
+public:
+ IncCreates()
+ : o_(fromjson("{'_id':1}")),
+ q_(fromjson("{'_id':1}")),
+ u_(fromjson("{$inc:{'a':1}}")),
+ ou_(fromjson("{'_id':1,a:1}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(ou_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o_);
+ }
- class Pull : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), BSON( "$pull" << BSON( "a" << 4.0 ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[5]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4,5]}" ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_, ou_;
+};
- class PullNothing : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), BSON( "$pull" << BSON( "a" << 6.0 ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4,5]}" ) );
- }
- };
- class PullAll : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$pullAll:{a:[4,5]}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[6]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
- }
- };
+class UpsertInsertIdMod : public Base {
+public:
+ UpsertInsertIdMod()
+ : q_(fromjson("{'_id':5,a:4}")),
+ u_(fromjson("{$inc:{a:3}}")),
+ ou_(fromjson("{'_id':5,a:7}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_, true);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(ou_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ }
- class Pop : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$pop:{a:1}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
- }
- };
+protected:
+ BSONObj q_, u_, ou_;
+};
- class PopReverse : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$pop:{a:-1}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0,a:[5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
- }
- };
+class UpsertInsertSet : public Base {
+public:
+ UpsertInsertSet()
+ : q_(fromjson("{a:5}")), u_(fromjson("{$set:{a:7}}")), ou_(fromjson("{a:7}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_, true);
+ }
+ void check() const {
+ ASSERT_EQUALS(2, count());
+ ASSERT(!_client.findOne(ns(), ou_).isEmpty());
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':7,a:7}"));
+ }
- class BitOp : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$bit:{a:{and:2,or:8}}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( BSON( "_id" << 0 << "a" << ( ( 3 & 2 ) | 8 ) ) , one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:3}" ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_, ou_;
+};
- class Rename : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- ASSERT_EQUALS(
- mutablebson::unordered( BSON( "_id" << 0 << "a" << 50 << "b" << 3 ) ),
- mutablebson::unordered( one( fromjson( "{'_id':0}" ) ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:3}" ) );
- }
- };
+class UpsertInsertInc : public Base {
+public:
+ UpsertInsertInc()
+ : q_(fromjson("{a:5}")), u_(fromjson("{$inc:{a:3}}")), ou_(fromjson("{a:8}")) {}
+ void doIt() const {
+ _client.update(ns(), q_, u_, true);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ ASSERT(!_client.findOne(ns(), ou_).isEmpty());
+ }
+ void reset() const {
+ deleteAll(ns());
+ }
- class RenameReplace : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- ASSERT_EQUALS(
- mutablebson::unordered( BSON( "_id" << 0 << "a" << 50 << "b" << 3 ) ),
- mutablebson::unordered( one( fromjson( "{'_id':0}" ) ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:3,b:100}" ) );
- }
- };
+protected:
+ BSONObj o_, q_, u_, ou_;
+};
+
+class MultiInc : public Base {
+public:
+ string s() const {
+ stringstream ss;
+ auto_ptr<DBClientCursor> cc = _client.query(ns(), Query().sort(BSON("_id" << 1)));
+ bool first = true;
+ while (cc->more()) {
+ if (first)
+ first = false;
+ else
+ ss << ",";
+
+ BSONObj o = cc->next();
+ ss << o["x"].numberInt();
+ }
+ return ss.str();
+ }
- class RenameOverwrite : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- ASSERT_EQUALS(
- mutablebson::unordered( BSON( "_id" << 0 << "b" << 3 << "z" << 1 ) ),
- mutablebson::unordered( one( fromjson( "{'_id':0}" ) ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,z:1,a:3}" ) );
- }
- };
+ void doIt() const {
+ _client.insert(ns(), BSON("_id" << 1 << "x" << 1));
+ _client.insert(ns(), BSON("_id" << 2 << "x" << 5));
- class NoRename : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{c:'b'},$set:{z:1}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( BSON( "_id" << 0 << "a" << 3 << "z" << 1 ) , one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:3}" ) );
- }
- };
-
- class NestedNoRename : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ),
- fromjson( "{$rename:{'a.b':'c.d'},$set:{z:1}}"
- ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( BSON( "_id" << 0 << "z" << 1 ) , one( fromjson("{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0}" ) );
- }
- };
+ ASSERT_EQUALS("1,5", s());
- class SingletonNoRename : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSONObj(), fromjson("{$rename:{a:'b'}}" ) );
+ _client.update(ns(), BSON("_id" << 1), BSON("$inc" << BSON("x" << 1)));
+ ASSERT_EQUALS("2,5", s());
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{_id:0,z:1}" ), one(fromjson("{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,z:1}" ) );
- }
- };
+ _client.update(ns(), BSONObj(), BSON("$inc" << BSON("x" << 1)));
+ ASSERT_EQUALS("3,5", s());
- class IndexedSingletonNoRename : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSONObj(), fromjson("{$rename:{a:'b'}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{_id:0,z:1}" ), one(fromjson("{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- // Add an index on 'a'. This prevents the update from running 'in place'.
- ASSERT_OK(dbtests::createIndex( &_txn, ns(), BSON( "a" << 1 ) ));
- insert( fromjson( "{'_id':0,z:1}" ) );
- }
- };
+ _client.update(ns(), BSONObj(), BSON("$inc" << BSON("x" << 1)), false, true);
+ check();
+ }
- class AddToSetEmptyMissing : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSON( "_id" << 0 ), fromjson(
- "{$addToSet:{a:{$each:[]}}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{_id:0,a:[]}" ), one( fromjson("{'_id':0}" ) )
- );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0}" ) );
- }
- };
+ void check() const {
+ ASSERT_EQUALS("4,6", s());
+ }
- class AddToSetWithDollarSigns : public Base {
- void doIt() const {
- _client.update( ns(),
- BSON( "_id" << 0),
- BSON( "$addToSet" << BSON( "a" << BSON( "$foo" << 1 ) ) ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{'_id':0, a:[0, {'$foo':1}]}"), one( fromjson( "{'_id':0}" ) ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( BSON( "_id" << 0 << "a" << BSON_ARRAY( 0 ) ) );
- }
- };
-
- //
- // replay cases
- //
-
- class ReplaySetPreexistingNoOpPull : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSONObj(), fromjson( "{$unset:{z:1}}" ));
-
- // This is logged as {$set:{'a.b':[]},$set:{z:1}}, which might not be
- // replayable against future versions of a document (here {_id:0,a:1,z:1}) due
- // to SERVER-4781. As a result the $set:{z:1} will not be replayed in such
- // cases (and also an exception may abort replication). If this were instead
- // logged as {$set:{z:1}}, SERVER-4781 would not be triggered.
- _client.update( ns(), BSONObj(), fromjson( "{$pull:{'a.b':1}, $set:{z:1}}" ) );
- _client.update( ns(), BSONObj(), fromjson( "{$set:{a:1}}" ) );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{_id:0,a:1,z:1}" ), one( fromjson("{'_id':0}") ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:{b:[]},z:1}" ) );
- }
- };
+ void reset() const {
+ deleteAll(ns());
+ }
+};
+
+class UpdateWithoutPreexistingId : public Base {
+public:
+ UpdateWithoutPreexistingId()
+ : o_(fromjson("{a:5}")), u_(fromjson("{a:5}")), ot_(fromjson("{b:4}")) {}
+ void doIt() const {
+ _client.update(ns(), o_, u_);
+ }
+ void check() const {
+ ASSERT_EQUALS(2, count());
+ checkOne(u_);
+ checkOne(ot_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(ot_);
+ insert(o_);
+ }
- class ReplayArrayFieldNotAppended : public Base {
- public:
- void doIt() const {
- _client.update( ns(), BSONObj(), fromjson( "{$push:{'a.0.b':2}}" ) );
- _client.update( ns(), BSONObj(), fromjson( "{$set:{'a.0':1}}") );
- }
- using ReplTests::Base::check;
- void check() const {
- ASSERT_EQUALS( 1, count() );
- check( fromjson( "{_id:0,a:[1,{b:[1]}]}" ), one(fromjson("{'_id':0}") ) );
- }
- void reset() const {
- deleteAll( ns() );
- insert( fromjson( "{'_id':0,a:[{b:[0]},{b:[1]}]}" ) );
- }
- };
-
- } // namespace Idempotence
-
- class DeleteOpIsIdBased : public Base {
- public:
- void run() {
- insert( BSON( "_id" << 0 << "a" << 10 ) );
- insert( BSON( "_id" << 1 << "a" << 11 ) );
- insert( BSON( "_id" << 3 << "a" << 10 ) );
- _client.remove( ns(), BSON( "a" << 10 ) );
- ASSERT_EQUALS( 1U, _client.count( ns(), BSONObj() ) );
- insert( BSON( "_id" << 0 << "a" << 11 ) );
- insert( BSON( "_id" << 2 << "a" << 10 ) );
- insert( BSON( "_id" << 3 << "a" << 10 ) );
-
- applyAllOperations();
- ASSERT_EQUALS( 2U, _client.count( ns(), BSONObj() ) );
- ASSERT( !one( BSON( "_id" << 1 ) ).isEmpty() );
- ASSERT( !one( BSON( "_id" << 2 ) ).isEmpty() );
- }
- };
-
- class DatabaseIgnorerBasic {
- public:
- void run() {
- DatabaseIgnorer d;
- ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
- d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
- ASSERT( d.ignoreAt( "a", OpTime( 4, 0 ) ) );
- ASSERT( !d.ignoreAt( "b", OpTime( 4, 0 ) ) );
- ASSERT( d.ignoreAt( "a", OpTime( 4, 10 ) ) );
- ASSERT( d.ignoreAt( "a", OpTime( 5, 0 ) ) );
- ASSERT( !d.ignoreAt( "a", OpTime( 5, 1 ) ) );
- // Ignore state is expired.
- ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
- }
- };
-
- class DatabaseIgnorerUpdate {
- public:
- void run() {
- DatabaseIgnorer d;
- d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
- d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
- ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
- ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
- ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
-
- d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
- d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
- d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
- d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
- ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
- ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
- ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
- }
- };
-
- class SyncTest : public Sync {
- public:
- bool returnEmpty;
- SyncTest() : Sync(""), returnEmpty(false) {}
- virtual ~SyncTest() {}
- virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
- if (returnEmpty) {
- BSONObj o;
- return o;
- }
- return BSON("_id" << "on remote" << "foo" << "baz");
- }
- };
+protected:
+ BSONObj o_, u_, ot_;
+};
+
+class Remove : public Base {
+public:
+ Remove()
+ : o1_(f("{\"_id\":\"010101010101010101010101\",\"a\":\"b\"}")),
+ o2_(f("{\"_id\":\"010101010101010101010102\",\"a\":\"b\"}")),
+ q_(f("{\"a\":\"b\"}")) {}
+ void doIt() const {
+ _client.remove(ns(), q_);
+ }
+ void check() const {
+ ASSERT_EQUALS(0, count());
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(o1_);
+ insert(o2_);
+ }
- class ShouldRetry : public Base {
- public:
- void run() {
- bool threw = false;
- BSONObj o = BSON("ns" << ns() << "o" << BSON("foo" << "bar") << "o2" << BSON("_id" << "in oplog" << "foo" << "bar"));
+protected:
+ BSONObj o1_, o2_, q_;
+};
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
+class RemoveOne : public Remove {
+ void doIt() const {
+ _client.remove(ns(), q_, true);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ }
+};
+
+class FailingUpdate : public Base {
+public:
+ FailingUpdate() : o_(fromjson("{'_id':1,a:'b'}")), u_(fromjson("{'_id':1,c:'d'}")) {}
+ void doIt() const {
+ _client.update(ns(), o_, u_);
+ _client.insert(ns(), o_);
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(o_);
+ }
+ void reset() const {
+ deleteAll(ns());
+ }
- // this should fail because we can't connect
- try {
- Sync badSource("localhost:123");
+protected:
+ BSONObj o_, u_;
+};
+
+class SetNumToStr : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(),
+ BSON("_id" << 0),
+ BSON("$set" << BSON("a"
+ << "bcd")));
+ }
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ checkOne(BSON("_id" << 0 << "a"
+ << "bcd"));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(BSON("_id" << 0 << "a" << 4.0));
+ }
+};
- Client::Context ctx(&_txn, ns());
- badSource.getMissingDoc(&_txn, ctx.db(), o);
- }
- catch (DBException&) {
- threw = true;
- }
- verify(threw);
+class Push : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), BSON("$push" << BSON("a" << 5.0)));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[4,5]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4]}"));
+ }
+};
- // now this should succeed
- SyncTest t;
- verify(t.shouldRetry(&_txn, o));
- verify(!_client.findOne(ns(), BSON("_id" << "on remote")).isEmpty());
+class PushUpsert : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), BSON("$push" << BSON("a" << 5.0)), true);
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[4,5]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4]}"));
+ }
+};
+
+class MultiPush : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(),
+ BSON("_id" << 0),
+ BSON("$push" << BSON("a" << 5.0) << "$push" << BSON("b.c" << 6.0)));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[4,5],b:{c:[6]}}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4]}"));
+ }
+};
- // force it not to find an obj
- t.returnEmpty = true;
- verify(!t.shouldRetry(&_txn, o));
- }
- };
+class EmptyPush : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), BSON("$push" << BSON("a" << 5.0)));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[5]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0}"));
+ }
+};
+
+class EmptyPushSparseIndex : public EmptyPush {
+public:
+ EmptyPushSparseIndex() {
+ _client.insert("unittests.system.indexes",
+ BSON("ns" << ns() << "key" << BSON("a" << 1) << "name"
+ << "foo"
+ << "sparse" << true));
+ }
+ ~EmptyPushSparseIndex() {
+ _client.dropIndexes(ns());
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "repl" ) {
- }
+class PushAll : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$pushAll:{a:[5.0,6.0]}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[4,5,6]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4]}"));
+ }
+};
- void setupTests() {
- add< LogBasic >();
- add< Idempotence::InsertTimestamp >();
- add< Idempotence::InsertAutoId >();
- add< Idempotence::InsertWithId >();
- add< Idempotence::InsertTwo >();
- add< Idempotence::InsertTwoIdentical >();
- add< Idempotence::UpdateTimestamp >();
- add< Idempotence::UpdateSameField >();
- add< Idempotence::UpdateSameFieldWithId >();
- add< Idempotence::UpdateSameFieldExplicitId >();
- add< Idempotence::UpdateDifferentFieldExplicitId >();
- add< Idempotence::UpsertUpdateNoMods >();
- add< Idempotence::UpsertInsertNoMods >();
- add< Idempotence::UpdateSet >();
- add< Idempotence::UpdateInc >();
- add< Idempotence::UpdateInc2 >();
- add< Idempotence::IncEmbedded >(); // SERVER-716
- add< Idempotence::IncCreates >(); // SERVER-717
- add< Idempotence::UpsertInsertIdMod >();
- add< Idempotence::UpsertInsertSet >();
- add< Idempotence::UpsertInsertInc >();
- add< Idempotence::MultiInc >();
- // Don't worry about this until someone wants this functionality.
-// add< Idempotence::UpdateWithoutPreexistingId >();
- add< Idempotence::Remove >();
- add< Idempotence::RemoveOne >();
- add< Idempotence::FailingUpdate >();
- add< Idempotence::SetNumToStr >();
- add< Idempotence::Push >();
- add< Idempotence::PushUpsert >();
- add< Idempotence::MultiPush >();
- add< Idempotence::EmptyPush >();
- add< Idempotence::EmptyPushSparseIndex >();
- add< Idempotence::PushAll >();
- add< Idempotence::PushSlice >();
- add< Idempotence::PushSliceInitiallyInexistent >();
- add< Idempotence::PushSliceToZero >();
- add< Idempotence::PushAllUpsert >();
- add< Idempotence::EmptyPushAll >();
- add< Idempotence::Pull >();
- add< Idempotence::PullNothing >();
- add< Idempotence::PullAll >();
- add< Idempotence::Pop >();
- add< Idempotence::PopReverse >();
- add< Idempotence::BitOp >();
- add< Idempotence::Rename >();
- add< Idempotence::RenameReplace >();
- add< Idempotence::RenameOverwrite >();
- add< Idempotence::NoRename >();
- add< Idempotence::NestedNoRename >();
- add< Idempotence::SingletonNoRename >();
- add< Idempotence::IndexedSingletonNoRename >();
- add< Idempotence::AddToSetEmptyMissing >();
- add< Idempotence::ReplaySetPreexistingNoOpPull >();
- add< Idempotence::ReplayArrayFieldNotAppended >();
- add< DeleteOpIsIdBased >();
- add< DatabaseIgnorerBasic >();
- add< DatabaseIgnorerUpdate >();
- add< ShouldRetry >();
- }
- };
+class PushWithDollarSigns : public Base {
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), BSON("$push" << BSON("a" << BSON("$foo" << 1))));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0, a:[0, {'$foo':1}]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(BSON("_id" << 0 << "a" << BSON_ARRAY(0)));
+ }
+};
+
+class PushSlice : public Base {
+ void doIt() const {
+ _client.update(
+ ns(),
+ BSON("_id" << 0),
+ BSON("$push" << BSON("a" << BSON("$each" << BSON_ARRAY(3) << "$slice" << -2))));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0, a:[2,3]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(BSON("_id" << 0 << "a" << BSON_ARRAY(1 << 2)));
+ }
+};
+
+class PushSliceInitiallyInexistent : public Base {
+ void doIt() const {
+ _client.update(
+ ns(),
+ BSON("_id" << 0),
+ BSON("$push" << BSON("a" << BSON("$each" << BSON_ARRAY(1 << 2) << "$slice" << -2))));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0, a:[1,2] }"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(BSON("_id" << 0));
+ }
+};
+
+class PushSliceToZero : public Base {
+ void doIt() const {
+ _client.update(
+ ns(),
+ BSON("_id" << 0),
+ BSON("$push" << BSON("a" << BSON("$each" << BSON_ARRAY(3) << "$slice" << 0))));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0, a:[]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(BSON("_id" << 0));
+ }
+};
+
+class PushAllUpsert : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$pushAll:{a:[5.0,6.0]}}"), true);
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[4,5,6]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4]}"));
+ }
+};
+
+class EmptyPushAll : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$pushAll:{a:[5.0,6.0]}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[5,6]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0}"));
+ }
+};
+
+class Pull : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), BSON("$pull" << BSON("a" << 4.0)));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[5]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4,5]}"));
+ }
+};
+
+class PullNothing : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), BSON("$pull" << BSON("a" << 6.0)));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[4,5]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4,5]}"));
+ }
+};
+
+class PullAll : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$pullAll:{a:[4,5]}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[6]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4,5,6]}"));
+ }
+};
- SuiteInstance<All> myall;
+class Pop : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$pop:{a:1}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[4,5]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4,5,6]}"));
+ }
+};
+
+class PopReverse : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$pop:{a:-1}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0,a:[5,6]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[4,5,6]}"));
+ }
+};
+
+class BitOp : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$bit:{a:{and:2,or:8}}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(BSON("_id" << 0 << "a" << ((3 & 2) | 8)), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:3}"));
+ }
+};
+
+class Rename : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$rename:{a:'b'}}"));
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$set:{a:50}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ ASSERT_EQUALS(mutablebson::unordered(BSON("_id" << 0 << "a" << 50 << "b" << 3)),
+ mutablebson::unordered(one(fromjson("{'_id':0}"))));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:3}"));
+ }
+};
+
+class RenameReplace : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$rename:{a:'b'}}"));
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$set:{a:50}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ ASSERT_EQUALS(mutablebson::unordered(BSON("_id" << 0 << "a" << 50 << "b" << 3)),
+ mutablebson::unordered(one(fromjson("{'_id':0}"))));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:3,b:100}"));
+ }
+};
+
+class RenameOverwrite : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$rename:{a:'b'}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ ASSERT_EQUALS(mutablebson::unordered(BSON("_id" << 0 << "b" << 3 << "z" << 1)),
+ mutablebson::unordered(one(fromjson("{'_id':0}"))));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,z:1,a:3}"));
+ }
+};
+
+class NoRename : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$rename:{c:'b'},$set:{z:1}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(BSON("_id" << 0 << "a" << 3 << "z" << 1), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:3}"));
+ }
+};
+
+class NestedNoRename : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$rename:{'a.b':'c.d'},$set:{z:1}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(BSON("_id" << 0 << "z" << 1), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0}"));
+ }
+};
+
+class SingletonNoRename : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSONObj(), fromjson("{$rename:{a:'b'}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{_id:0,z:1}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,z:1}"));
+ }
+};
+
+class IndexedSingletonNoRename : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSONObj(), fromjson("{$rename:{a:'b'}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{_id:0,z:1}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ // Add an index on 'a'. This prevents the update from running 'in place'.
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1)));
+ insert(fromjson("{'_id':0,z:1}"));
+ }
+};
+
+class AddToSetEmptyMissing : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), fromjson("{$addToSet:{a:{$each:[]}}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{_id:0,a:[]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0}"));
+ }
+};
+
+class AddToSetWithDollarSigns : public Base {
+ void doIt() const {
+ _client.update(ns(), BSON("_id" << 0), BSON("$addToSet" << BSON("a" << BSON("$foo" << 1))));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{'_id':0, a:[0, {'$foo':1}]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(BSON("_id" << 0 << "a" << BSON_ARRAY(0)));
+ }
+};
+
+//
+// replay cases
+//
+
+class ReplaySetPreexistingNoOpPull : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSONObj(), fromjson("{$unset:{z:1}}"));
+
+ // This is logged as {$set:{'a.b':[]},$set:{z:1}}, which might not be
+ // replayable against future versions of a document (here {_id:0,a:1,z:1}) due
+ // to SERVER-4781. As a result the $set:{z:1} will not be replayed in such
+ // cases (and also an exception may abort replication). If this were instead
+ // logged as {$set:{z:1}}, SERVER-4781 would not be triggered.
+ _client.update(ns(), BSONObj(), fromjson("{$pull:{'a.b':1}, $set:{z:1}}"));
+ _client.update(ns(), BSONObj(), fromjson("{$set:{a:1}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{_id:0,a:1,z:1}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:{b:[]},z:1}"));
+ }
+};
+
+class ReplayArrayFieldNotAppended : public Base {
+public:
+ void doIt() const {
+ _client.update(ns(), BSONObj(), fromjson("{$push:{'a.0.b':2}}"));
+ _client.update(ns(), BSONObj(), fromjson("{$set:{'a.0':1}}"));
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS(1, count());
+ check(fromjson("{_id:0,a:[1,{b:[1]}]}"), one(fromjson("{'_id':0}")));
+ }
+ void reset() const {
+ deleteAll(ns());
+ insert(fromjson("{'_id':0,a:[{b:[0]},{b:[1]}]}"));
+ }
+};
+
+} // namespace Idempotence
+
+class DeleteOpIsIdBased : public Base {
+public:
+ void run() {
+ insert(BSON("_id" << 0 << "a" << 10));
+ insert(BSON("_id" << 1 << "a" << 11));
+ insert(BSON("_id" << 3 << "a" << 10));
+ _client.remove(ns(), BSON("a" << 10));
+ ASSERT_EQUALS(1U, _client.count(ns(), BSONObj()));
+ insert(BSON("_id" << 0 << "a" << 11));
+ insert(BSON("_id" << 2 << "a" << 10));
+ insert(BSON("_id" << 3 << "a" << 10));
+
+ applyAllOperations();
+ ASSERT_EQUALS(2U, _client.count(ns(), BSONObj()));
+ ASSERT(!one(BSON("_id" << 1)).isEmpty());
+ ASSERT(!one(BSON("_id" << 2)).isEmpty());
+ }
+};
+
+class DatabaseIgnorerBasic {
+public:
+ void run() {
+ DatabaseIgnorer d;
+ ASSERT(!d.ignoreAt("a", OpTime(4, 0)));
+ d.doIgnoreUntilAfter("a", OpTime(5, 0));
+ ASSERT(d.ignoreAt("a", OpTime(4, 0)));
+ ASSERT(!d.ignoreAt("b", OpTime(4, 0)));
+ ASSERT(d.ignoreAt("a", OpTime(4, 10)));
+ ASSERT(d.ignoreAt("a", OpTime(5, 0)));
+ ASSERT(!d.ignoreAt("a", OpTime(5, 1)));
+ // Ignore state is expired.
+ ASSERT(!d.ignoreAt("a", OpTime(4, 0)));
+ }
+};
+
+class DatabaseIgnorerUpdate {
+public:
+ void run() {
+ DatabaseIgnorer d;
+ d.doIgnoreUntilAfter("a", OpTime(5, 0));
+ d.doIgnoreUntilAfter("a", OpTime(6, 0));
+ ASSERT(d.ignoreAt("a", OpTime(5, 5)));
+ ASSERT(d.ignoreAt("a", OpTime(6, 0)));
+ ASSERT(!d.ignoreAt("a", OpTime(6, 1)));
+
+ d.doIgnoreUntilAfter("a", OpTime(5, 0));
+ d.doIgnoreUntilAfter("a", OpTime(6, 0));
+ d.doIgnoreUntilAfter("a", OpTime(6, 0));
+ d.doIgnoreUntilAfter("a", OpTime(5, 0));
+ ASSERT(d.ignoreAt("a", OpTime(5, 5)));
+ ASSERT(d.ignoreAt("a", OpTime(6, 0)));
+ ASSERT(!d.ignoreAt("a", OpTime(6, 1)));
+ }
+};
+
+class SyncTest : public Sync {
+public:
+ bool returnEmpty;
+ SyncTest() : Sync(""), returnEmpty(false) {}
+ virtual ~SyncTest() {}
+ virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
+ if (returnEmpty) {
+ BSONObj o;
+ return o;
+ }
+ return BSON("_id"
+ << "on remote"
+ << "foo"
+ << "baz");
+ }
+};
+
+class ShouldRetry : public Base {
+public:
+ void run() {
+ bool threw = false;
+ BSONObj o = BSON("ns" << ns() << "o" << BSON("foo"
+ << "bar") << "o2" << BSON("_id"
+ << "in oplog"
+ << "foo"
+ << "bar"));
+
+ ScopedTransaction transaction(&_txn, MODE_X);
+ Lock::GlobalWrite lk(_txn.lockState());
+
+ // this should fail because we can't connect
+ try {
+ Sync badSource("localhost:123");
+
+ Client::Context ctx(&_txn, ns());
+ badSource.getMissingDoc(&_txn, ctx.db(), o);
+ } catch (DBException&) {
+ threw = true;
+ }
+ verify(threw);
+
+ // now this should succeed
+ SyncTest t;
+ verify(t.shouldRetry(&_txn, o));
+ verify(!_client.findOne(ns(),
+ BSON("_id"
+ << "on remote")).isEmpty());
+
+ // force it not to find an obj
+ t.returnEmpty = true;
+ verify(!t.shouldRetry(&_txn, o));
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("repl") {}
+
+ void setupTests() {
+ add<LogBasic>();
+ add<Idempotence::InsertTimestamp>();
+ add<Idempotence::InsertAutoId>();
+ add<Idempotence::InsertWithId>();
+ add<Idempotence::InsertTwo>();
+ add<Idempotence::InsertTwoIdentical>();
+ add<Idempotence::UpdateTimestamp>();
+ add<Idempotence::UpdateSameField>();
+ add<Idempotence::UpdateSameFieldWithId>();
+ add<Idempotence::UpdateSameFieldExplicitId>();
+ add<Idempotence::UpdateDifferentFieldExplicitId>();
+ add<Idempotence::UpsertUpdateNoMods>();
+ add<Idempotence::UpsertInsertNoMods>();
+ add<Idempotence::UpdateSet>();
+ add<Idempotence::UpdateInc>();
+ add<Idempotence::UpdateInc2>();
+ add<Idempotence::IncEmbedded>(); // SERVER-716
+ add<Idempotence::IncCreates>(); // SERVER-717
+ add<Idempotence::UpsertInsertIdMod>();
+ add<Idempotence::UpsertInsertSet>();
+ add<Idempotence::UpsertInsertInc>();
+ add<Idempotence::MultiInc>();
+ // Don't worry about this until someone wants this functionality.
+ // add< Idempotence::UpdateWithoutPreexistingId >();
+ add<Idempotence::Remove>();
+ add<Idempotence::RemoveOne>();
+ add<Idempotence::FailingUpdate>();
+ add<Idempotence::SetNumToStr>();
+ add<Idempotence::Push>();
+ add<Idempotence::PushUpsert>();
+ add<Idempotence::MultiPush>();
+ add<Idempotence::EmptyPush>();
+ add<Idempotence::EmptyPushSparseIndex>();
+ add<Idempotence::PushAll>();
+ add<Idempotence::PushSlice>();
+ add<Idempotence::PushSliceInitiallyInexistent>();
+ add<Idempotence::PushSliceToZero>();
+ add<Idempotence::PushAllUpsert>();
+ add<Idempotence::EmptyPushAll>();
+ add<Idempotence::Pull>();
+ add<Idempotence::PullNothing>();
+ add<Idempotence::PullAll>();
+ add<Idempotence::Pop>();
+ add<Idempotence::PopReverse>();
+ add<Idempotence::BitOp>();
+ add<Idempotence::Rename>();
+ add<Idempotence::RenameReplace>();
+ add<Idempotence::RenameOverwrite>();
+ add<Idempotence::NoRename>();
+ add<Idempotence::NestedNoRename>();
+ add<Idempotence::SingletonNoRename>();
+ add<Idempotence::IndexedSingletonNoRename>();
+ add<Idempotence::AddToSetEmptyMissing>();
+ add<Idempotence::ReplaySetPreexistingNoOpPull>();
+ add<Idempotence::ReplayArrayFieldNotAppended>();
+ add<DeleteOpIsIdBased>();
+ add<DatabaseIgnorerBasic>();
+ add<DatabaseIgnorerUpdate>();
+ add<ShouldRetry>();
+ }
+};
-} // namespace ReplTests
+SuiteInstance<All> myall;
+} // namespace ReplTests
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index b28ce38755b..98d89dacfd8 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -48,714 +48,697 @@ using std::string;
namespace RollbackTests {
namespace {
- void dropDatabase( OperationContext* txn, const NamespaceString& nss ) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock( txn->lockState() );
- Database* db = dbHolder().get( txn, nss.db() );
+void dropDatabase(OperationContext* txn, const NamespaceString& nss) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+ Database* db = dbHolder().get(txn, nss.db());
- if ( db ) {
- dropDatabase( txn, db );
- }
- }
- bool collectionExists( Client::Context* ctx, const string& ns ) {
- const DatabaseCatalogEntry* dbEntry = ctx->db()->getDatabaseCatalogEntry();
- list<string> names;
- dbEntry->getCollectionNamespaces( &names );
- return std::find( names.begin(), names.end(), ns ) != names.end();
- }
- void createCollection( OperationContext* txn, const NamespaceString& nss ) {
- ScopedTransaction transaction( txn, MODE_IX );
- Lock::DBLock dbXLock( txn->lockState(), nss.db(), MODE_X );
- Client::Context ctx( txn, nss.ns() );
- {
- WriteUnitOfWork uow( txn );
- ASSERT( !collectionExists( &ctx, nss.ns() ) );
- ASSERT_OK( userCreateNS( txn, ctx.db(), nss.ns(), BSONObj(), false, false ) );
- ASSERT( collectionExists( &ctx, nss.ns() ) );
- uow.commit();
- }
- }
- Status renameCollection( OperationContext* txn,
- const NamespaceString& source,
- const NamespaceString& target ) {
- ASSERT_EQ( source.db(), target.db() );
- Database* db = dbHolder().get( txn, source.db() );
- return db->renameCollection( txn, source.ns(), target.ns(), false );
+ if (db) {
+ dropDatabase(txn, db);
}
- Status truncateCollection( OperationContext* txn, const NamespaceString& nss ) {
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- return coll->truncate( txn );
+}
+bool collectionExists(Client::Context* ctx, const string& ns) {
+ const DatabaseCatalogEntry* dbEntry = ctx->db()->getDatabaseCatalogEntry();
+ list<string> names;
+ dbEntry->getCollectionNamespaces(&names);
+ return std::find(names.begin(), names.end(), ns) != names.end();
+}
+void createCollection(OperationContext* txn, const NamespaceString& nss) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), nss.db(), MODE_X);
+ Client::Context ctx(txn, nss.ns());
+ {
+ WriteUnitOfWork uow(txn);
+ ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT_OK(userCreateNS(txn, ctx.db(), nss.ns(), BSONObj(), false, false));
+ ASSERT(collectionExists(&ctx, nss.ns()));
+ uow.commit();
}
- RecordId insertRecord( OperationContext* txn,
+}
+Status renameCollection(OperationContext* txn,
+ const NamespaceString& source,
+ const NamespaceString& target) {
+ ASSERT_EQ(source.db(), target.db());
+ Database* db = dbHolder().get(txn, source.db());
+ return db->renameCollection(txn, source.ns(), target.ns(), false);
+}
+Status truncateCollection(OperationContext* txn, const NamespaceString& nss) {
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ return coll->truncate(txn);
+}
+RecordId insertRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) {
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ StatusWith<RecordId> status = coll->insertDocument(txn, data, false);
+ ASSERT_OK(status.getStatus());
+ return status.getValue();
+}
+void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) {
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ scoped_ptr<RecordIterator> iter(coll->getIterator(txn));
+ ASSERT(!iter->isEOF());
+ RecordId loc = iter->getNext();
+ ASSERT(iter->isEOF());
+ ASSERT_EQ(data, coll->docFor(txn, loc).value());
+}
+void assertEmpty(OperationContext* txn, const NamespaceString& nss) {
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ scoped_ptr<RecordIterator> iter(coll->getIterator(txn));
+ ASSERT(iter->isEOF());
+}
+bool indexExists(OperationContext* txn, const NamespaceString& nss, const string& idxName) {
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ return coll->getIndexCatalog()->findIndexByName(txn, idxName, true) != NULL;
+}
+bool indexReady(OperationContext* txn, const NamespaceString& nss, const string& idxName) {
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ return coll->getIndexCatalog()->findIndexByName(txn, idxName, false) != NULL;
+}
+size_t getNumIndexEntries(OperationContext* txn,
const NamespaceString& nss,
- const BSONObj& data ) {
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- StatusWith<RecordId> status = coll->insertDocument( txn, data, false );
- ASSERT_OK( status.getStatus() );
- return status.getValue();
- }
- void assertOnlyRecord( OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& data ) {
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- scoped_ptr<RecordIterator> iter( coll->getIterator( txn ) );
- ASSERT( !iter->isEOF() );
- RecordId loc = iter->getNext();
- ASSERT( iter->isEOF() );
- ASSERT_EQ( data, coll->docFor( txn, loc ).value() );
- }
- void assertEmpty( OperationContext* txn, const NamespaceString& nss ) {
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- scoped_ptr<RecordIterator> iter( coll->getIterator( txn ) );
- ASSERT( iter->isEOF() );
- }
- bool indexExists( OperationContext* txn, const NamespaceString& nss, const string& idxName ) {
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- return coll->getIndexCatalog()->findIndexByName( txn, idxName, true ) != NULL;
- }
- bool indexReady( OperationContext* txn, const NamespaceString& nss, const string& idxName ) {
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- return coll->getIndexCatalog()->findIndexByName( txn, idxName, false ) != NULL;
- }
- size_t getNumIndexEntries( OperationContext* txn,
- const NamespaceString& nss,
- const string& idxName ) {
- size_t numEntries = 0;
+ const string& idxName) {
+ size_t numEntries = 0;
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- IndexCatalog* catalog = coll->getIndexCatalog();
- IndexDescriptor* desc = catalog->findIndexByName( txn, idxName, false );
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ IndexCatalog* catalog = coll->getIndexCatalog();
+ IndexDescriptor* desc = catalog->findIndexByName(txn, idxName, false);
- if ( desc ) {
- CursorOptions cursorOptions;
- cursorOptions.direction = CursorOptions::INCREASING;
+ if (desc) {
+ CursorOptions cursorOptions;
+ cursorOptions.direction = CursorOptions::INCREASING;
- IndexCursor *cursor;
- ASSERT_OK( catalog->getIndex( desc )->newCursor( txn, cursorOptions, &cursor ) );
- ASSERT_OK( cursor->seek( minKey ) );
+ IndexCursor* cursor;
+ ASSERT_OK(catalog->getIndex(desc)->newCursor(txn, cursorOptions, &cursor));
+ ASSERT_OK(cursor->seek(minKey));
- while ( !cursor->isEOF() ) {
- numEntries++;
- cursor->next();
- }
- delete cursor;
+ while (!cursor->isEOF()) {
+ numEntries++;
+ cursor->next();
}
-
- return numEntries;
+ delete cursor;
}
- void dropIndex( OperationContext* txn, const NamespaceString& nss, const string& idxName ) {
- Collection* coll = dbHolder().get( txn, nss.db() )->getCollection(nss.ns() );
- IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName( txn, idxName );
- ASSERT( desc );
- ASSERT_OK( coll->getIndexCatalog()->dropIndex( txn, desc ) );
- }
-} // namespace
-
- template<bool rollback, bool defaultIndexes>
- class CreateCollection {
- public:
- void run() {
- string ns = "unittests.rollback_create_collection";
- OperationContextImpl txn;
- NamespaceString nss( ns );
- dropDatabase( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock( txn.lockState(), nss.db(), MODE_X );
- Client::Context ctx( &txn, ns );
- {
- WriteUnitOfWork uow( &txn );
- ASSERT( !collectionExists( &ctx, ns ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), ns, BSONObj(), false, defaultIndexes ) );
- ASSERT( collectionExists( &ctx, ns ) );
- if ( !rollback ) {
- uow.commit();
- }
- }
- if ( rollback ) {
- ASSERT( !collectionExists( &ctx, ns ) );
- }
- else {
- ASSERT( collectionExists( &ctx, ns ) );
+
+ return numEntries;
+}
+void dropIndex(OperationContext* txn, const NamespaceString& nss, const string& idxName) {
+ Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(txn, idxName);
+ ASSERT(desc);
+ ASSERT_OK(coll->getIndexCatalog()->dropIndex(txn, desc));
+}
+} // namespace
+
+template <bool rollback, bool defaultIndexes>
+class CreateCollection {
+public:
+ void run() {
+ string ns = "unittests.rollback_create_collection";
+ OperationContextImpl txn;
+ NamespaceString nss(ns);
+ dropDatabase(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
+ Client::Context ctx(&txn, ns);
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT(!collectionExists(&ctx, ns));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), ns, BSONObj(), false, defaultIndexes));
+ ASSERT(collectionExists(&ctx, ns));
+ if (!rollback) {
+ uow.commit();
}
}
- };
-
- template<bool rollback, bool defaultIndexes>
- class DropCollection {
- public:
- void run() {
- string ns = "unittests.rollback_drop_collection";
- OperationContextImpl txn;
- NamespaceString nss( ns );
- dropDatabase( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock( txn.lockState(), nss.db(), MODE_X );
- Client::Context ctx( &txn, ns );
- {
- WriteUnitOfWork uow( &txn );
- ASSERT( !collectionExists( &ctx, ns ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), ns, BSONObj(), false, defaultIndexes ) );
+ if (rollback) {
+ ASSERT(!collectionExists(&ctx, ns));
+ } else {
+ ASSERT(collectionExists(&ctx, ns));
+ }
+ }
+};
+
+template <bool rollback, bool defaultIndexes>
+class DropCollection {
+public:
+ void run() {
+ string ns = "unittests.rollback_drop_collection";
+ OperationContextImpl txn;
+ NamespaceString nss(ns);
+ dropDatabase(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
+ Client::Context ctx(&txn, ns);
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT(!collectionExists(&ctx, ns));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), ns, BSONObj(), false, defaultIndexes));
+ uow.commit();
+ }
+ ASSERT(collectionExists(&ctx, ns));
+
+ // END OF SETUP / START OF TEST
+
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT(collectionExists(&ctx, ns));
+ ASSERT_OK(ctx.db()->dropCollection(&txn, ns));
+ ASSERT(!collectionExists(&ctx, ns));
+ if (!rollback) {
uow.commit();
}
- ASSERT( collectionExists( &ctx, ns ) );
-
- // END OF SETUP / START OF TEST
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT( collectionExists( &ctx, ns ) );
- ASSERT_OK( ctx.db()->dropCollection( &txn, ns ) );
- ASSERT( !collectionExists( &ctx, ns ) );
- if ( !rollback ) {
- uow.commit();
- }
- }
- if ( rollback ) {
- ASSERT( collectionExists( &ctx, ns ) );
- }
- else {
- ASSERT( !collectionExists( &ctx, ns ) );
- }
}
- };
-
- template<bool rollback, bool defaultIndexes>
- class RenameCollection {
- public:
- void run() {
- NamespaceString source( "unittests.rollback_rename_collection_src" );
- NamespaceString target( "unittests.rollback_rename_collection_dest" );
- OperationContextImpl txn;
-
- dropDatabase( &txn, source );
- dropDatabase( &txn, target );
-
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite globalWriteLock( txn.lockState() );
- Client::Context ctx( &txn, source );
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT( !collectionExists( &ctx, source ) );
- ASSERT( !collectionExists( &ctx, target ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), source.ns(), BSONObj(), false,
- defaultIndexes ) );
+ if (rollback) {
+ ASSERT(collectionExists(&ctx, ns));
+ } else {
+ ASSERT(!collectionExists(&ctx, ns));
+ }
+ }
+};
+
+template <bool rollback, bool defaultIndexes>
+class RenameCollection {
+public:
+ void run() {
+ NamespaceString source("unittests.rollback_rename_collection_src");
+ NamespaceString target("unittests.rollback_rename_collection_dest");
+ OperationContextImpl txn;
+
+ dropDatabase(&txn, source);
+ dropDatabase(&txn, target);
+
+ ScopedTransaction transaction(&txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn.lockState());
+ Client::Context ctx(&txn, source);
+
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT(!collectionExists(&ctx, source));
+ ASSERT(!collectionExists(&ctx, target));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), source.ns(), BSONObj(), false, defaultIndexes));
+ uow.commit();
+ }
+ ASSERT(collectionExists(&ctx, source));
+ ASSERT(!collectionExists(&ctx, target));
+
+ // END OF SETUP / START OF TEST
+
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT_OK(renameCollection(&txn, source, target));
+ ASSERT(!collectionExists(&ctx, source));
+ ASSERT(collectionExists(&ctx, target));
+ if (!rollback) {
uow.commit();
}
- ASSERT( collectionExists( &ctx, source ) );
- ASSERT( !collectionExists( &ctx, target ) );
-
- // END OF SETUP / START OF TEST
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT_OK( renameCollection( &txn, source, target ) );
- ASSERT( !collectionExists( &ctx, source ) );
- ASSERT( collectionExists( &ctx, target ) );
- if ( !rollback ) {
- uow.commit();
- }
- }
- if ( rollback ) {
- ASSERT( collectionExists( &ctx, source ) );
- ASSERT( !collectionExists( &ctx, target ) );
- }
- else {
- ASSERT( !collectionExists( &ctx, source ) );
- ASSERT( collectionExists( &ctx, target ) );
- }
}
- };
-
- template<bool rollback, bool defaultIndexes>
- class RenameDropTargetCollection {
- public:
- void run() {
- NamespaceString source( "unittests.rollback_rename_droptarget_collection_src" );
- NamespaceString target( "unittests.rollback_rename_droptarget_collection_dest" );
- OperationContextImpl txn;
-
- dropDatabase( &txn, source );
- dropDatabase( &txn, target );
-
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite globalWriteLock( txn.lockState() );
- Client::Context ctx( &txn, source );
-
- BSONObj sourceDoc = BSON( "_id" << "source" );
- BSONObj targetDoc = BSON( "_id" << "target" );
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT( !collectionExists( &ctx, source ) );
- ASSERT( !collectionExists( &ctx, target ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), source.ns(), BSONObj(), false,
- defaultIndexes ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), target.ns(), BSONObj(), false,
- defaultIndexes ) );
-
- insertRecord( &txn, source, sourceDoc );
- insertRecord( &txn, target, targetDoc );
+ if (rollback) {
+ ASSERT(collectionExists(&ctx, source));
+ ASSERT(!collectionExists(&ctx, target));
+ } else {
+ ASSERT(!collectionExists(&ctx, source));
+ ASSERT(collectionExists(&ctx, target));
+ }
+ }
+};
+
+template <bool rollback, bool defaultIndexes>
+class RenameDropTargetCollection {
+public:
+ void run() {
+ NamespaceString source("unittests.rollback_rename_droptarget_collection_src");
+ NamespaceString target("unittests.rollback_rename_droptarget_collection_dest");
+ OperationContextImpl txn;
+
+ dropDatabase(&txn, source);
+ dropDatabase(&txn, target);
+
+ ScopedTransaction transaction(&txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn.lockState());
+ Client::Context ctx(&txn, source);
+
+ BSONObj sourceDoc = BSON("_id"
+ << "source");
+ BSONObj targetDoc = BSON("_id"
+ << "target");
+
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT(!collectionExists(&ctx, source));
+ ASSERT(!collectionExists(&ctx, target));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), source.ns(), BSONObj(), false, defaultIndexes));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), target.ns(), BSONObj(), false, defaultIndexes));
+
+ insertRecord(&txn, source, sourceDoc);
+ insertRecord(&txn, target, targetDoc);
+
+ uow.commit();
+ }
+ ASSERT(collectionExists(&ctx, source));
+ ASSERT(collectionExists(&ctx, target));
+ assertOnlyRecord(&txn, source, sourceDoc);
+ assertOnlyRecord(&txn, target, targetDoc);
+
+ // END OF SETUP / START OF TEST
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT_OK(ctx.db()->dropCollection(&txn, target.ns()));
+ ASSERT_OK(renameCollection(&txn, source, target));
+ ASSERT(!collectionExists(&ctx, source));
+ ASSERT(collectionExists(&ctx, target));
+ assertOnlyRecord(&txn, target, sourceDoc);
+ if (!rollback) {
uow.commit();
}
- ASSERT( collectionExists( &ctx, source ) );
- ASSERT( collectionExists( &ctx, target ) );
- assertOnlyRecord( &txn, source, sourceDoc );
- assertOnlyRecord( &txn, target, targetDoc );
-
- // END OF SETUP / START OF TEST
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT_OK( ctx.db()->dropCollection( &txn, target.ns() ) );
- ASSERT_OK( renameCollection( &txn, source, target ) );
- ASSERT( !collectionExists( &ctx, source ) );
- ASSERT( collectionExists( &ctx, target ) );
- assertOnlyRecord( &txn, target, sourceDoc );
- if ( !rollback ) {
- uow.commit();
- }
- }
- if ( rollback ) {
- ASSERT( collectionExists( &ctx, source ) );
- ASSERT( collectionExists( &ctx, target ) );
- assertOnlyRecord( &txn, source, sourceDoc );
- assertOnlyRecord( &txn, target, targetDoc );
- }
- else {
- ASSERT( !collectionExists( &ctx, source ) );
- ASSERT( collectionExists( &ctx, target ) );
- assertOnlyRecord( &txn, target, sourceDoc );
- }
}
- };
-
- template<bool rollback, bool defaultIndexes>
- class ReplaceCollection {
- public:
- void run() {
- NamespaceString nss( "unittests.rollback_replace_collection" );
- OperationContextImpl txn;
- dropDatabase( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock( txn.lockState(), nss.db(), MODE_X );
- Client::Context ctx( &txn, nss );
-
- BSONObj oldDoc = BSON( "_id" << "old" );
- BSONObj newDoc = BSON( "_id" << "new" );
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT( !collectionExists( &ctx, nss ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
- insertRecord( &txn, nss, oldDoc );
+ if (rollback) {
+ ASSERT(collectionExists(&ctx, source));
+ ASSERT(collectionExists(&ctx, target));
+ assertOnlyRecord(&txn, source, sourceDoc);
+ assertOnlyRecord(&txn, target, targetDoc);
+ } else {
+ ASSERT(!collectionExists(&ctx, source));
+ ASSERT(collectionExists(&ctx, target));
+ assertOnlyRecord(&txn, target, sourceDoc);
+ }
+ }
+};
+
+template <bool rollback, bool defaultIndexes>
+class ReplaceCollection {
+public:
+ void run() {
+ NamespaceString nss("unittests.rollback_replace_collection");
+ OperationContextImpl txn;
+ dropDatabase(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
+ Client::Context ctx(&txn, nss);
+
+ BSONObj oldDoc = BSON("_id"
+ << "old");
+ BSONObj newDoc = BSON("_id"
+ << "new");
+
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT(!collectionExists(&ctx, nss));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), false, defaultIndexes));
+ insertRecord(&txn, nss, oldDoc);
+ uow.commit();
+ }
+ ASSERT(collectionExists(&ctx, nss));
+ assertOnlyRecord(&txn, nss, oldDoc);
+
+ // END OF SETUP / START OF TEST
+
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT_OK(ctx.db()->dropCollection(&txn, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), false, defaultIndexes));
+ ASSERT(collectionExists(&ctx, nss));
+ insertRecord(&txn, nss, newDoc);
+ assertOnlyRecord(&txn, nss, newDoc);
+ if (!rollback) {
uow.commit();
}
- ASSERT( collectionExists( &ctx, nss ) );
- assertOnlyRecord( &txn, nss, oldDoc );
-
- // END OF SETUP / START OF TEST
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT_OK( ctx.db()->dropCollection( &txn, nss.ns() ) );
- ASSERT( !collectionExists( &ctx, nss ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
- ASSERT( collectionExists( &ctx, nss ) );
- insertRecord( &txn, nss, newDoc );
- assertOnlyRecord( &txn, nss, newDoc );
- if ( !rollback ) {
- uow.commit();
- }
- }
- ASSERT( collectionExists( &ctx, nss ) );
- if ( rollback ) {
- assertOnlyRecord( &txn, nss, oldDoc );
- }
- else {
- assertOnlyRecord( &txn, nss, newDoc );
- }
}
- };
-
- template<bool rollback, bool defaultIndexes>
- class CreateDropCollection {
- public:
- void run() {
- NamespaceString nss( "unittests.rollback_create_drop_collection" );
- OperationContextImpl txn;
- dropDatabase( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock( txn.lockState(), nss.db(), MODE_X );
- Client::Context ctx( &txn, nss );
-
- BSONObj doc = BSON( "_id" << "example string" );
-
- ASSERT( !collectionExists( &ctx, nss ) );
- {
- WriteUnitOfWork uow( &txn );
-
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
- ASSERT( collectionExists( &ctx, nss ) );
- insertRecord( &txn, nss, doc );
- assertOnlyRecord( &txn, nss, doc );
-
- ASSERT_OK( ctx.db()->dropCollection( &txn, nss.ns() ) );
- ASSERT( !collectionExists( &ctx, nss ) );
-
- if ( !rollback ) {
- uow.commit();
- }
- }
- ASSERT( !collectionExists( &ctx, nss ) );
+ ASSERT(collectionExists(&ctx, nss));
+ if (rollback) {
+ assertOnlyRecord(&txn, nss, oldDoc);
+ } else {
+ assertOnlyRecord(&txn, nss, newDoc);
}
- };
-
- template<bool rollback, bool defaultIndexes>
- class TruncateCollection {
- public:
- void run() {
- NamespaceString nss( "unittests.rollback_truncate_collection" );
- OperationContextImpl txn;
- dropDatabase( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock( txn.lockState(), nss.db(), MODE_X );
- Client::Context ctx( &txn, nss );
-
- BSONObj doc = BSON( "_id" << "foo" );
-
- ASSERT( !collectionExists( &ctx, nss ) );
- {
- WriteUnitOfWork uow( &txn );
-
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
- ASSERT( collectionExists( &ctx, nss ) );
- insertRecord( &txn, nss, doc );
- assertOnlyRecord( &txn, nss, doc );
+ }
+};
+
+template <bool rollback, bool defaultIndexes>
+class CreateDropCollection {
+public:
+ void run() {
+ NamespaceString nss("unittests.rollback_create_drop_collection");
+ OperationContextImpl txn;
+ dropDatabase(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
+ Client::Context ctx(&txn, nss);
+
+ BSONObj doc = BSON("_id"
+ << "example string");
+
+ ASSERT(!collectionExists(&ctx, nss));
+ {
+ WriteUnitOfWork uow(&txn);
+
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), false, defaultIndexes));
+ ASSERT(collectionExists(&ctx, nss));
+ insertRecord(&txn, nss, doc);
+ assertOnlyRecord(&txn, nss, doc);
+
+ ASSERT_OK(ctx.db()->dropCollection(&txn, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss));
+
+ if (!rollback) {
uow.commit();
}
- assertOnlyRecord( &txn, nss, doc );
+ }
+ ASSERT(!collectionExists(&ctx, nss));
+ }
+};
- // END OF SETUP / START OF TEST
+template <bool rollback, bool defaultIndexes>
+class TruncateCollection {
+public:
+ void run() {
+ NamespaceString nss("unittests.rollback_truncate_collection");
+ OperationContextImpl txn;
+ dropDatabase(&txn, nss);
- {
- WriteUnitOfWork uow( &txn );
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
+ Client::Context ctx(&txn, nss);
- ASSERT_OK( truncateCollection( &txn, nss ) );
- ASSERT( collectionExists( &ctx, nss ) );
- assertEmpty( &txn, nss );
+ BSONObj doc = BSON("_id"
+ << "foo");
- if ( !rollback ) {
- uow.commit();
- }
- }
- ASSERT( collectionExists( &ctx, nss ) );
- if ( rollback ) {
- assertOnlyRecord( &txn, nss, doc );
- }
- else {
- assertEmpty( &txn, nss );
- }
+ ASSERT(!collectionExists(&ctx, nss));
+ {
+ WriteUnitOfWork uow(&txn);
+
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), false, defaultIndexes));
+ ASSERT(collectionExists(&ctx, nss));
+ insertRecord(&txn, nss, doc);
+ assertOnlyRecord(&txn, nss, doc);
+ uow.commit();
}
- };
-
- template<bool rollback>
- class CreateIndex {
- public:
- void run() {
- string ns = "unittests.rollback_create_index";
- OperationContextImpl txn;
- NamespaceString nss( ns );
- dropDatabase( &txn, nss );
- createCollection( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
-
- Collection* coll = autoDb.getDb()->getCollection( ns );
- IndexCatalog* catalog = coll->getIndexCatalog();
+ assertOnlyRecord(&txn, nss, doc);
- string idxName = "a";
- BSONObj spec = BSON( "ns" << ns << "key" << BSON( "a" << 1 ) << "name" << idxName );
+ // END OF SETUP / START OF TEST
- // END SETUP / START TEST
+ {
+ WriteUnitOfWork uow(&txn);
- {
- WriteUnitOfWork uow( &txn );
- ASSERT_OK( catalog->createIndexOnEmptyCollection( &txn, spec ) );
- insertRecord( &txn, nss, BSON( "a" << 1 ) );
- insertRecord( &txn, nss, BSON( "a" << 2 ) );
- insertRecord( &txn, nss, BSON( "a" << 3 ) );
- if ( !rollback ) {
- uow.commit();
- }
- }
+ ASSERT_OK(truncateCollection(&txn, nss));
+ ASSERT(collectionExists(&ctx, nss));
+ assertEmpty(&txn, nss);
- if ( rollback ) {
- ASSERT( !indexExists( &txn, nss, idxName ) );
- }
- else {
- ASSERT( indexReady( &txn, nss, idxName ) );
+ if (!rollback) {
+ uow.commit();
}
}
- };
-
- template<bool rollback>
- class DropIndex {
- public:
- void run() {
- string ns = "unittests.rollback_drop_index";
- OperationContextImpl txn;
- NamespaceString nss( ns );
- dropDatabase( &txn, nss );
- createCollection( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
-
- Collection* coll = autoDb.getDb()->getCollection(ns);
- IndexCatalog* catalog = coll->getIndexCatalog();
+ ASSERT(collectionExists(&ctx, nss));
+ if (rollback) {
+ assertOnlyRecord(&txn, nss, doc);
+ } else {
+ assertEmpty(&txn, nss);
+ }
+ }
+};
+
+template <bool rollback>
+class CreateIndex {
+public:
+ void run() {
+ string ns = "unittests.rollback_create_index";
+ OperationContextImpl txn;
+ NamespaceString nss(ns);
+ dropDatabase(&txn, nss);
+ createCollection(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+
+ Collection* coll = autoDb.getDb()->getCollection(ns);
+ IndexCatalog* catalog = coll->getIndexCatalog();
+
+ string idxName = "a";
+ BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName);
- string idxName = "a";
- BSONObj spec = BSON( "ns" << ns << "key" << BSON( "a" << 1 ) << "name" << idxName );
+ // END SETUP / START TEST
- {
- WriteUnitOfWork uow( &txn );
- ASSERT_OK( catalog->createIndexOnEmptyCollection( &txn, spec ) );
- insertRecord( &txn, nss, BSON( "a" << 1 ) );
- insertRecord( &txn, nss, BSON( "a" << 2 ) );
- insertRecord( &txn, nss, BSON( "a" << 3 ) );
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
+ insertRecord(&txn, nss, BSON("a" << 1));
+ insertRecord(&txn, nss, BSON("a" << 2));
+ insertRecord(&txn, nss, BSON("a" << 3));
+ if (!rollback) {
uow.commit();
}
- ASSERT( indexReady( &txn, nss, idxName ) );
- ASSERT_EQ( 3u, getNumIndexEntries( &txn, nss, idxName ) );
+ }
- // END SETUP / START TEST
+ if (rollback) {
+ ASSERT(!indexExists(&txn, nss, idxName));
+ } else {
+ ASSERT(indexReady(&txn, nss, idxName));
+ }
+ }
+};
+
+template <bool rollback>
+class DropIndex {
+public:
+ void run() {
+ string ns = "unittests.rollback_drop_index";
+ OperationContextImpl txn;
+ NamespaceString nss(ns);
+ dropDatabase(&txn, nss);
+ createCollection(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+
+ Collection* coll = autoDb.getDb()->getCollection(ns);
+ IndexCatalog* catalog = coll->getIndexCatalog();
- {
- WriteUnitOfWork uow( &txn );
+ string idxName = "a";
+ BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName);
+
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
+ insertRecord(&txn, nss, BSON("a" << 1));
+ insertRecord(&txn, nss, BSON("a" << 2));
+ insertRecord(&txn, nss, BSON("a" << 3));
+ uow.commit();
+ }
+ ASSERT(indexReady(&txn, nss, idxName));
+ ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName));
- dropIndex( &txn, nss, idxName );
- ASSERT( !indexExists( &txn, nss, idxName ) );
+ // END SETUP / START TEST
- if ( !rollback ) {
- uow.commit();
- }
- }
- if ( rollback ) {
- ASSERT( indexExists( &txn, nss, idxName ) );
- ASSERT( indexReady( &txn, nss, idxName ) );
- ASSERT_EQ( 3u, getNumIndexEntries( &txn, nss, idxName ) );
- }
- else {
- ASSERT( !indexExists( &txn, nss, idxName ) );
+ {
+ WriteUnitOfWork uow(&txn);
+
+ dropIndex(&txn, nss, idxName);
+ ASSERT(!indexExists(&txn, nss, idxName));
+
+ if (!rollback) {
+ uow.commit();
}
}
- };
-
- template<bool rollback>
- class CreateDropIndex {
- public:
- void run() {
- string ns = "unittests.rollback_create_drop_index";
- OperationContextImpl txn;
- NamespaceString nss( ns );
- dropDatabase( &txn, nss );
- createCollection( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
-
- Collection* coll = autoDb.getDb()->getCollection(ns);
- IndexCatalog* catalog = coll->getIndexCatalog();
+ if (rollback) {
+ ASSERT(indexExists(&txn, nss, idxName));
+ ASSERT(indexReady(&txn, nss, idxName));
+ ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName));
+ } else {
+ ASSERT(!indexExists(&txn, nss, idxName));
+ }
+ }
+};
+
+template <bool rollback>
+class CreateDropIndex {
+public:
+ void run() {
+ string ns = "unittests.rollback_create_drop_index";
+ OperationContextImpl txn;
+ NamespaceString nss(ns);
+ dropDatabase(&txn, nss);
+ createCollection(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+
+ Collection* coll = autoDb.getDb()->getCollection(ns);
+ IndexCatalog* catalog = coll->getIndexCatalog();
- string idxName = "a";
- BSONObj spec = BSON( "ns" << ns << "key" << BSON( "a" << 1 ) << "name" << idxName );
+ string idxName = "a";
+ BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName);
- // END SETUP / START TEST
+ // END SETUP / START TEST
- {
- WriteUnitOfWork uow( &txn );
+ {
+ WriteUnitOfWork uow(&txn);
- ASSERT_OK( catalog->createIndexOnEmptyCollection( &txn, spec ) );
- insertRecord( &txn, nss, BSON( "a" << 1 ) );
- insertRecord( &txn, nss, BSON( "a" << 2 ) );
- insertRecord( &txn, nss, BSON( "a" << 3 ) );
- ASSERT( indexExists( &txn, nss, idxName ) );
- ASSERT_EQ( 3u, getNumIndexEntries( &txn, nss, idxName ) );
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
+ insertRecord(&txn, nss, BSON("a" << 1));
+ insertRecord(&txn, nss, BSON("a" << 2));
+ insertRecord(&txn, nss, BSON("a" << 3));
+ ASSERT(indexExists(&txn, nss, idxName));
+ ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName));
- dropIndex( &txn, nss, idxName );
- ASSERT( !indexExists( &txn, nss, idxName ) );
+ dropIndex(&txn, nss, idxName);
+ ASSERT(!indexExists(&txn, nss, idxName));
- if ( !rollback ) {
- uow.commit();
- }
+ if (!rollback) {
+ uow.commit();
}
-
- ASSERT( !indexExists( &txn, nss, idxName ) );
}
- };
-
- template<bool rollback>
- class SetIndexHead {
- public:
- void run() {
- string ns = "unittests.rollback_set_index_head";
- OperationContextImpl txn;
- NamespaceString nss( ns );
- dropDatabase( &txn, nss );
- createCollection( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
-
- Collection* coll = autoDb.getDb()->getCollection(ns);
- IndexCatalog* catalog = coll->getIndexCatalog();
- string idxName = "a";
- BSONObj spec = BSON( "ns" << ns << "key" << BSON( "a" << 1 ) << "name" << idxName );
+ ASSERT(!indexExists(&txn, nss, idxName));
+ }
+};
+
+template <bool rollback>
+class SetIndexHead {
+public:
+ void run() {
+ string ns = "unittests.rollback_set_index_head";
+ OperationContextImpl txn;
+ NamespaceString nss(ns);
+ dropDatabase(&txn, nss);
+ createCollection(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+
+ Collection* coll = autoDb.getDb()->getCollection(ns);
+ IndexCatalog* catalog = coll->getIndexCatalog();
- {
- WriteUnitOfWork uow( &txn );
- ASSERT_OK( catalog->createIndexOnEmptyCollection( &txn, spec ) );
- uow.commit();
- }
+ string idxName = "a";
+ BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName);
- IndexDescriptor* indexDesc = catalog->findIndexByName(&txn, idxName);
- invariant(indexDesc);
- const IndexCatalogEntry* ice = catalog->getEntry(indexDesc);
- invariant(ice);
- HeadManager* headManager = ice->headManager();
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
+ uow.commit();
+ }
- const RecordId oldHead = headManager->getHead(&txn);
- ASSERT_EQ(oldHead, ice->head(&txn));
+ IndexDescriptor* indexDesc = catalog->findIndexByName(&txn, idxName);
+ invariant(indexDesc);
+ const IndexCatalogEntry* ice = catalog->getEntry(indexDesc);
+ invariant(ice);
+ HeadManager* headManager = ice->headManager();
- const RecordId dummyHead(123, 456);
- ASSERT_NE(oldHead, dummyHead);
+ const RecordId oldHead = headManager->getHead(&txn);
+ ASSERT_EQ(oldHead, ice->head(&txn));
- // END SETUP / START TEST
+ const RecordId dummyHead(123, 456);
+ ASSERT_NE(oldHead, dummyHead);
- {
- WriteUnitOfWork uow( &txn );
+ // END SETUP / START TEST
- headManager->setHead(&txn, dummyHead);
+ {
+ WriteUnitOfWork uow(&txn);
- ASSERT_EQ(ice->head(&txn), dummyHead);
- ASSERT_EQ(headManager->getHead(&txn), dummyHead);
+ headManager->setHead(&txn, dummyHead);
- if ( !rollback ) {
- uow.commit();
- }
- }
+ ASSERT_EQ(ice->head(&txn), dummyHead);
+ ASSERT_EQ(headManager->getHead(&txn), dummyHead);
- if ( rollback ) {
- ASSERT_EQ(ice->head(&txn), oldHead);
- ASSERT_EQ(headManager->getHead(&txn), oldHead);
- }
- else {
- ASSERT_EQ(ice->head(&txn), dummyHead);
- ASSERT_EQ(headManager->getHead(&txn), dummyHead);
+ if (!rollback) {
+ uow.commit();
}
}
- };
-
- template<bool rollback>
- class CreateCollectionAndIndexes {
- public:
- void run() {
- string ns = "unittests.rollback_create_collection_and_indexes";
- OperationContextImpl txn;
- NamespaceString nss( ns );
- dropDatabase( &txn, nss );
-
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock( txn.lockState(), nss.db(), MODE_X );
- Client::Context ctx( &txn, nss.ns() );
-
- string idxNameA = "indexA";
- string idxNameB = "indexB";
- string idxNameC = "indexC";
- BSONObj specA = BSON( "ns" << ns << "key" << BSON( "a" << 1 ) << "name" << idxNameA );
- BSONObj specB = BSON( "ns" << ns << "key" << BSON( "b" << 1 ) << "name" << idxNameB );
- BSONObj specC = BSON( "ns" << ns << "key" << BSON( "c" << 1 ) << "name" << idxNameC );
-
- // END SETUP / START TEST
-
- {
- WriteUnitOfWork uow( &txn );
- ASSERT( !collectionExists( &ctx, nss.ns() ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false, false ) );
- ASSERT( collectionExists( &ctx, nss.ns() ) );
- Collection* coll = ctx.db()->getCollection( ns );
- IndexCatalog* catalog = coll->getIndexCatalog();
-
- ASSERT_OK( catalog->createIndexOnEmptyCollection( &txn, specA ) );
- ASSERT_OK( catalog->createIndexOnEmptyCollection( &txn, specB ) );
- ASSERT_OK( catalog->createIndexOnEmptyCollection( &txn, specC ) );
-
- if ( !rollback ) {
- uow.commit();
- }
- } // uow
- if ( rollback ) {
- ASSERT( !collectionExists( &ctx, ns ) );
- }
- else {
- ASSERT( collectionExists( &ctx, ns ) );
- ASSERT( indexReady( &txn, nss, idxNameA ) );
- ASSERT( indexReady( &txn, nss, idxNameB ) );
- ASSERT( indexReady( &txn, nss, idxNameC ) );
- }
+
+ if (rollback) {
+ ASSERT_EQ(ice->head(&txn), oldHead);
+ ASSERT_EQ(headManager->getHead(&txn), oldHead);
+ } else {
+ ASSERT_EQ(ice->head(&txn), dummyHead);
+ ASSERT_EQ(headManager->getHead(&txn), dummyHead);
}
- };
+ }
+};
+
+template <bool rollback>
+class CreateCollectionAndIndexes {
+public:
+ void run() {
+ string ns = "unittests.rollback_create_collection_and_indexes";
+ OperationContextImpl txn;
+ NamespaceString nss(ns);
+ dropDatabase(&txn, nss);
+
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
+ Client::Context ctx(&txn, nss.ns());
+
+ string idxNameA = "indexA";
+ string idxNameB = "indexB";
+ string idxNameC = "indexC";
+ BSONObj specA = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxNameA);
+ BSONObj specB = BSON("ns" << ns << "key" << BSON("b" << 1) << "name" << idxNameB);
+ BSONObj specC = BSON("ns" << ns << "key" << BSON("c" << 1) << "name" << idxNameC);
+
+ // END SETUP / START TEST
+ {
+ WriteUnitOfWork uow(&txn);
+ ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), false, false));
+ ASSERT(collectionExists(&ctx, nss.ns()));
+ Collection* coll = ctx.db()->getCollection(ns);
+ IndexCatalog* catalog = coll->getIndexCatalog();
- class All : public Suite {
- public:
- All() : Suite( "rollback" ) {
- }
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specA));
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specB));
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specC));
- template< template<bool> class T >
- void addAll() {
- add< T<false> >();
- add< T<true> >();
+ if (!rollback) {
+ uow.commit();
+ }
+ } // uow
+ if (rollback) {
+ ASSERT(!collectionExists(&ctx, ns));
+ } else {
+ ASSERT(collectionExists(&ctx, ns));
+ ASSERT(indexReady(&txn, nss, idxNameA));
+ ASSERT(indexReady(&txn, nss, idxNameB));
+ ASSERT(indexReady(&txn, nss, idxNameC));
}
+ }
+};
- template< template<bool, bool> class T >
- void addAll() {
- add< T<false, false> >();
- add< T<true, false> >();
- add< T<false, true> >();
- add< T<true, true> >();
- }
- void setupTests() {
- addAll< CreateCollection >();
- addAll< RenameCollection >();
- addAll< DropCollection >();
- addAll< RenameDropTargetCollection >();
- addAll< ReplaceCollection >();
- addAll< CreateDropCollection >();
- addAll< TruncateCollection >();
- addAll< CreateIndex >();
- addAll< DropIndex >();
- addAll< CreateDropIndex >();
- addAll< SetIndexHead >();
- addAll< CreateCollectionAndIndexes >();
- }
- };
+class All : public Suite {
+public:
+ All() : Suite("rollback") {}
- SuiteInstance<All> all;
+ template <template <bool> class T>
+ void addAll() {
+ add<T<false>>();
+ add<T<true>>();
+ }
+
+ template <template <bool, bool> class T>
+ void addAll() {
+ add<T<false, false>>();
+ add<T<true, false>>();
+ add<T<false, true>>();
+ add<T<true, true>>();
+ }
+
+ void setupTests() {
+ addAll<CreateCollection>();
+ addAll<RenameCollection>();
+ addAll<DropCollection>();
+ addAll<RenameDropTargetCollection>();
+ addAll<ReplaceCollection>();
+ addAll<CreateDropCollection>();
+ addAll<TruncateCollection>();
+ addAll<CreateIndex>();
+ addAll<DropIndex>();
+ addAll<CreateDropIndex>();
+ addAll<SetIndexHead>();
+ addAll<CreateCollectionAndIndexes>();
+ }
+};
-} // namespace RollbackTests
+SuiteInstance<All> all;
+} // namespace RollbackTests
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index 552999c7882..e88e3b96144 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -47,645 +47,641 @@
namespace ShardingTests {
- using boost::shared_ptr;
- using std::auto_ptr;
- using std::make_pair;
- using std::map;
- using std::pair;
- using std::set;
- using std::string;
- using std::vector;
-
- namespace serverandquerytests {
- class test1 {
- public:
- void run() {
- ServerAndQuery a( "foo:1" , BSON( "a" << GT << 0 << LTE << 100 ) );
- ServerAndQuery b( "foo:1" , BSON( "a" << GT << 200 << LTE << 1000 ) );
-
- ASSERT( a < b );
- ASSERT( ! ( b < a ) );
-
- set<ServerAndQuery> s;
- s.insert( a );
- s.insert( b );
-
- ASSERT_EQUALS( (unsigned int)2 , s.size() );
- }
- };
+using boost::shared_ptr;
+using std::auto_ptr;
+using std::make_pair;
+using std::map;
+using std::pair;
+using std::set;
+using std::string;
+using std::vector;
+
+namespace serverandquerytests {
+class test1 {
+public:
+ void run() {
+ ServerAndQuery a("foo:1", BSON("a" << GT << 0 << LTE << 100));
+ ServerAndQuery b("foo:1", BSON("a" << GT << 200 << LTE << 1000));
+
+ ASSERT(a < b);
+ ASSERT(!(b < a));
+
+ set<ServerAndQuery> s;
+ s.insert(a);
+ s.insert(b);
+
+ ASSERT_EQUALS((unsigned int)2, s.size());
}
+};
+}
- static int rand( int max = -1 ){
- static unsigned seed = 1337;
+static int rand(int max = -1) {
+ static unsigned seed = 1337;
#if !defined(_WIN32)
- int r = rand_r( &seed ) ;
+ int r = rand_r(&seed);
#else
- int r = ::rand(); // seed not used in this case
+ int r = ::rand(); // seed not used in this case
#endif
- // Modding is bad, but don't really care in this case
- return max > 0 ? r % max : r;
- }
-
- //
- // Sets up a basic environment for loading chunks to/from the direct database connection
- // Redirects connections to the direct database for the duration of the test.
- //
- class ChunkManagerTest : public ConnectionString::ConnectionHook {
- public:
-
- ChunkManagerTest() : _client(&_txn) {
- shardConnectionPool.clear();
-
- DBException::traceExceptions = true;
-
- // Make all connections redirect to the direct client
- ConnectionString::setConnectionHook( this );
-
- // Create the default config database before querying, necessary for direct connections
- _client.dropDatabase( "config" );
- _client.insert( "config.test", BSON( "hello" << "world" ) );
- _client.dropCollection( "config.test" );
-
- _client.dropDatabase( nsGetDB( collName() ) );
- _client.insert( collName(), BSON( "hello" << "world" ) );
- _client.dropCollection( collName() );
-
- // Since we've redirected the conns, the host doesn't matter here so long as it's
- // prefixed with a "$"
- _shard = Shard("shard0000",
- "$hostFooBar:27017",
- 0 /* maxSize */,
- false /* draining */);
- // Need to run this to ensure the shard is in the global lookup table
- Shard::installShard(_shard.getName(), _shard);
-
- // Create an index so that diffing works correctly, otherwise no cursors from S&O
- ASSERT_OK(dbtests::createIndex(
- &_txn,
- ChunkType::ConfigNS,
- BSON( ChunkType::ns() << 1 << // br
- ChunkType::DEPRECATED_lastmod() << 1 ) ));
- configServer.init("$dummy:1000");
- }
-
- virtual ~ChunkManagerTest() {
- // Reset the redirection
- ConnectionString::setConnectionHook( NULL );
- }
-
- string collName(){ return "foo.bar"; }
-
- Shard& shard(){ return _shard; }
-
- virtual DBClientBase* connect( const ConnectionString& connStr,
- string& errmsg,
- double socketTimeout )
- {
- // Note - must be new, since it gets owned elsewhere
- return new CustomDirectClient(&_txn);
- }
-
-
- protected:
- OperationContextImpl _txn;
- CustomDirectClient _client;
- Shard _shard;
- };
+ // Modding is bad, but don't really care in this case
+ return max > 0 ? r % max : r;
+}
- //
- // Tests creating a new chunk manager and creating the default chunks
- //
- class ChunkManagerCreateBasicTest : public ChunkManagerTest {
- public:
+//
+// Sets up a basic environment for loading chunks to/from the direct database connection
+// Redirects connections to the direct database for the duration of the test.
+//
+class ChunkManagerTest : public ConnectionString::ConnectionHook {
+public:
+ ChunkManagerTest() : _client(&_txn) {
+ shardConnectionPool.clear();
+
+ DBException::traceExceptions = true;
+
+ // Make all connections redirect to the direct client
+ ConnectionString::setConnectionHook(this);
+
+ // Create the default config database before querying, necessary for direct connections
+ _client.dropDatabase("config");
+ _client.insert("config.test",
+ BSON("hello"
+ << "world"));
+ _client.dropCollection("config.test");
+
+ _client.dropDatabase(nsGetDB(collName()));
+ _client.insert(collName(),
+ BSON("hello"
+ << "world"));
+ _client.dropCollection(collName());
+
+ // Since we've redirected the conns, the host doesn't matter here so long as it's
+ // prefixed with a "$"
+ _shard = Shard("shard0000", "$hostFooBar:27017", 0 /* maxSize */, false /* draining */);
+ // Need to run this to ensure the shard is in the global lookup table
+ Shard::installShard(_shard.getName(), _shard);
+
+ // Create an index so that diffing works correctly, otherwise no cursors from S&O
+ ASSERT_OK(dbtests::createIndex(&_txn,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << 1 << // br
+ ChunkType::DEPRECATED_lastmod() << 1)));
+ configServer.init("$dummy:1000");
+ }
- void run(){
+ virtual ~ChunkManagerTest() {
+ // Reset the redirection
+ ConnectionString::setConnectionHook(NULL);
+ }
- ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkManager manager(collName(), shardKeyPattern, false);
- manager.createFirstChunks( shard().getConnString(), shard(), NULL, NULL );
+ string collName() {
+ return "foo.bar";
+ }
- BSONObj firstChunk = _client.findOne(ChunkType::ConfigNS, BSONObj()).getOwned();
+ Shard& shard() {
+ return _shard;
+ }
- ASSERT(firstChunk[ChunkType::min()].Obj()[ "_id" ].type() == MinKey );
- ASSERT(firstChunk[ChunkType::max()].Obj()[ "_id" ].type() == MaxKey );
+ virtual DBClientBase* connect(const ConnectionString& connStr,
+ string& errmsg,
+ double socketTimeout) {
+ // Note - must be new, since it gets owned elsewhere
+ return new CustomDirectClient(&_txn);
+ }
- ChunkVersion version = ChunkVersion::fromBSON(firstChunk,
- ChunkType::DEPRECATED_lastmod());
- ASSERT( version.majorVersion() == 1 );
- ASSERT( version.minorVersion() == 0 );
- ASSERT( version.isEpochSet() );
+protected:
+ OperationContextImpl _txn;
+ CustomDirectClient _client;
+ Shard _shard;
+};
- }
+//
+// Tests creating a new chunk manager and creating the default chunks
+//
+class ChunkManagerCreateBasicTest : public ChunkManagerTest {
+public:
+ void run() {
+ ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+ ChunkManager manager(collName(), shardKeyPattern, false);
+ manager.createFirstChunks(shard().getConnString(), shard(), NULL, NULL);
- };
+ BSONObj firstChunk = _client.findOne(ChunkType::ConfigNS, BSONObj()).getOwned();
- //
- // Tests creating a new chunk manager with random split points. Creating chunks on multiple shards is not
- // tested here since there are unresolved race conditions there and probably should be avoided if at all
- // possible.
- //
- class ChunkManagerCreateFullTest : public ChunkManagerTest {
- public:
+ ASSERT(firstChunk[ChunkType::min()].Obj()["_id"].type() == MinKey);
+ ASSERT(firstChunk[ChunkType::max()].Obj()["_id"].type() == MaxKey);
- static const int numSplitPoints = 100;
+ ChunkVersion version = ChunkVersion::fromBSON(firstChunk, ChunkType::DEPRECATED_lastmod());
- void genRandomSplitPoints( vector<int>* splitPoints ){
- for( int i = 0; i < numSplitPoints; i++ ){
- splitPoints->push_back( rand( numSplitPoints * 10 ) );
- }
+ ASSERT(version.majorVersion() == 1);
+ ASSERT(version.minorVersion() == 0);
+ ASSERT(version.isEpochSet());
+ }
+};
+
+//
+// Tests creating a new chunk manager with random split points. Creating chunks on multiple shards is not
+// tested here since there are unresolved race conditions there and probably should be avoided if at all
+// possible.
+//
+class ChunkManagerCreateFullTest : public ChunkManagerTest {
+public:
+ static const int numSplitPoints = 100;
+
+ void genRandomSplitPoints(vector<int>* splitPoints) {
+ for (int i = 0; i < numSplitPoints; i++) {
+ splitPoints->push_back(rand(numSplitPoints * 10));
}
+ }
- void genRandomSplitKeys( const string& keyName, vector<BSONObj>* splitKeys ){
- vector<int> splitPoints;
- genRandomSplitPoints( &splitPoints );
+ void genRandomSplitKeys(const string& keyName, vector<BSONObj>* splitKeys) {
+ vector<int> splitPoints;
+ genRandomSplitPoints(&splitPoints);
- for( vector<int>::iterator it = splitPoints.begin(); it != splitPoints.end(); ++it ){
- splitKeys->push_back( BSON( keyName << *it ) );
- }
+ for (vector<int>::iterator it = splitPoints.begin(); it != splitPoints.end(); ++it) {
+ splitKeys->push_back(BSON(keyName << *it));
}
+ }
- // Uses a chunk manager to create chunks
- void createChunks( const string& keyName ){
-
- vector<BSONObj> splitKeys;
- genRandomSplitKeys( keyName, &splitKeys );
-
- ShardKeyPattern shardKeyPattern(BSON(keyName << 1));
- ChunkManager manager(collName(), shardKeyPattern, false);
-
- manager.createFirstChunks( shard().getConnString(), shard(), &splitKeys, NULL );
- }
+ // Uses a chunk manager to create chunks
+ void createChunks(const string& keyName) {
+ vector<BSONObj> splitKeys;
+ genRandomSplitKeys(keyName, &splitKeys);
- void run(){
+ ShardKeyPattern shardKeyPattern(BSON(keyName << 1));
+ ChunkManager manager(collName(), shardKeyPattern, false);
- string keyName = "_id";
- createChunks( keyName );
+ manager.createFirstChunks(shard().getConnString(), shard(), &splitKeys, NULL);
+ }
- auto_ptr<DBClientCursor> cursor =
- _client.query(ChunkType::ConfigNS, QUERY(ChunkType::ns(collName())));
+ void run() {
+ string keyName = "_id";
+ createChunks(keyName);
- set<int> minorVersions;
- OID epoch;
+ auto_ptr<DBClientCursor> cursor =
+ _client.query(ChunkType::ConfigNS, QUERY(ChunkType::ns(collName())));
- // Check that all chunks were created with version 1|x with consistent epoch and unique minor versions
- while( cursor->more() ){
+ set<int> minorVersions;
+ OID epoch;
- BSONObj chunk = cursor->next();
+ // Check that all chunks were created with version 1|x with consistent epoch and unique minor versions
+ while (cursor->more()) {
+ BSONObj chunk = cursor->next();
- ChunkVersion version = ChunkVersion::fromBSON(chunk,
- ChunkType::DEPRECATED_lastmod());
+ ChunkVersion version = ChunkVersion::fromBSON(chunk, ChunkType::DEPRECATED_lastmod());
- ASSERT( version.majorVersion() == 1 );
- ASSERT( version.isEpochSet() );
+ ASSERT(version.majorVersion() == 1);
+ ASSERT(version.isEpochSet());
- if( ! epoch.isSet() ) epoch = version.epoch();
- ASSERT( version.epoch() == epoch );
+ if (!epoch.isSet())
+ epoch = version.epoch();
+ ASSERT(version.epoch() == epoch);
- ASSERT( minorVersions.find( version.minorVersion() ) == minorVersions.end() );
- minorVersions.insert( version.minorVersion() );
+ ASSERT(minorVersions.find(version.minorVersion()) == minorVersions.end());
+ minorVersions.insert(version.minorVersion());
- ASSERT(chunk[ChunkType::shard()].String() == shard().getName());
- }
+ ASSERT(chunk[ChunkType::shard()].String() == shard().getName());
}
+ }
+};
+
+//
+// Tests that chunks are loaded correctly from the db with no a-priori info and also that they can be reloaded
+// on top of an old chunk manager with changes.
+//
+class ChunkManagerLoadBasicTest : public ChunkManagerCreateFullTest {
+public:
+ void run() {
+ string keyName = "_id";
+ createChunks(keyName);
+ int numChunks =
+ static_cast<int>(_client.count(ChunkType::ConfigNS, BSON(ChunkType::ns(collName()))));
+
+ BSONObj firstChunk = _client.findOne(ChunkType::ConfigNS, BSONObj()).getOwned();
+
+ ChunkVersion version = ChunkVersion::fromBSON(firstChunk, ChunkType::DEPRECATED_lastmod());
+
+ // Make manager load existing chunks
+ BSONObjBuilder collDocBuilder;
+ collDocBuilder << CollectionType::ns(collName());
+ collDocBuilder << CollectionType::keyPattern(BSON("_id" << 1));
+ collDocBuilder << CollectionType::unique(false);
+ collDocBuilder << CollectionType::dropped(false);
+ collDocBuilder << CollectionType::DEPRECATED_lastmod(jsTime());
+ collDocBuilder << CollectionType::DEPRECATED_lastmodEpoch(version.epoch());
+
+ BSONObj collDoc(collDocBuilder.done());
+
+ ChunkManager manager(collDoc);
+ manager.loadExistingRanges(shard().getConnString(), NULL);
+
+ ASSERT(manager.getVersion().epoch() == version.epoch());
+ ASSERT(manager.getVersion().minorVersion() == (numChunks - 1));
+ ASSERT(static_cast<int>(manager.getChunkMap().size()) == numChunks);
+
+ // Modify chunks collection
+ BSONObjBuilder b;
+ ChunkVersion laterVersion = ChunkVersion(2, 1, version.epoch());
+ laterVersion.addToBSON(b, ChunkType::DEPRECATED_lastmod());
+
+ _client.update(ChunkType::ConfigNS, BSONObj(), BSON("$set" << b.obj()));
+
+ // Make new manager load chunk diff
+ ChunkManager newManager(manager.getns(), manager.getShardKeyPattern(), manager.isUnique());
+ newManager.loadExistingRanges(shard().getConnString(), &manager);
+
+ ASSERT(newManager.getVersion().toLong() == laterVersion.toLong());
+ ASSERT(newManager.getVersion().epoch() == laterVersion.epoch());
+ ASSERT(static_cast<int>(newManager.getChunkMap().size()) == numChunks);
+ }
+};
- };
-
- //
- // Tests that chunks are loaded correctly from the db with no a-priori info and also that they can be reloaded
- // on top of an old chunk manager with changes.
- //
- class ChunkManagerLoadBasicTest : public ChunkManagerCreateFullTest {
- public:
-
- void run(){
-
- string keyName = "_id";
- createChunks( keyName );
- int numChunks = static_cast<int>(_client.count(ChunkType::ConfigNS,
- BSON(ChunkType::ns(collName()))));
-
- BSONObj firstChunk = _client.findOne(ChunkType::ConfigNS, BSONObj()).getOwned();
-
- ChunkVersion version = ChunkVersion::fromBSON(firstChunk,
- ChunkType::DEPRECATED_lastmod());
-
- // Make manager load existing chunks
- BSONObjBuilder collDocBuilder;
- collDocBuilder << CollectionType::ns(collName());
- collDocBuilder << CollectionType::keyPattern(BSON( "_id" << 1 ));
- collDocBuilder << CollectionType::unique(false);
- collDocBuilder << CollectionType::dropped(false);
- collDocBuilder << CollectionType::DEPRECATED_lastmod(jsTime());
- collDocBuilder << CollectionType::DEPRECATED_lastmodEpoch(version.epoch());
-
- BSONObj collDoc(collDocBuilder.done());
-
- ChunkManager manager(collDoc);
- manager.loadExistingRanges(shard().getConnString(), NULL);
+class ChunkDiffUnitTest {
+public:
+ bool _inverse;
- ASSERT(manager.getVersion().epoch() == version.epoch());
- ASSERT(manager.getVersion().minorVersion() == (numChunks - 1));
- ASSERT(static_cast<int>(manager.getChunkMap().size()) == numChunks);
+ typedef map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
+ typedef map<string, ChunkVersion> VersionMap;
- // Modify chunks collection
- BSONObjBuilder b;
- ChunkVersion laterVersion = ChunkVersion( 2, 1, version.epoch() );
- laterVersion.addToBSON(b, ChunkType::DEPRECATED_lastmod());
+ ChunkDiffUnitTest(bool inverse) : _inverse(inverse) {}
- _client.update(ChunkType::ConfigNS, BSONObj(), BSON( "$set" << b.obj()));
+ // The default pass-through adapter for using config diffs
+ class DefaultDiffAdapter : public ConfigDiffTracker<BSONObj, string> {
+ public:
+ DefaultDiffAdapter() {}
+ virtual ~DefaultDiffAdapter() {}
- // Make new manager load chunk diff
- ChunkManager newManager(manager.getns(),
- manager.getShardKeyPattern(),
- manager.isUnique());
- newManager.loadExistingRanges(shard().getConnString(), &manager);
+ virtual bool isTracked(const BSONObj& chunkDoc) const {
+ return true;
+ }
+ virtual BSONObj maxFrom(const BSONObj& max) const {
+ return max;
+ }
- ASSERT( newManager.getVersion().toLong() == laterVersion.toLong() );
- ASSERT( newManager.getVersion().epoch() == laterVersion.epoch() );
- ASSERT( static_cast<int>( newManager.getChunkMap().size() ) == numChunks );
+ virtual pair<BSONObj, BSONObj> rangeFor(const BSONObj& chunkDoc,
+ const BSONObj& min,
+ const BSONObj& max) const {
+ return make_pair(min, max);
}
+ virtual string shardFor(const string& name) const {
+ return name;
+ }
};
- class ChunkDiffUnitTest {
+ // Inverts the storage order for chunks from min to max
+ class InverseDiffAdapter : public DefaultDiffAdapter {
public:
+ InverseDiffAdapter() {}
+ virtual ~InverseDiffAdapter() {}
- bool _inverse;
-
- typedef map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
- typedef map<string, ChunkVersion> VersionMap;
+ // Disable
+ virtual BSONObj maxFrom(const BSONObj& max) const {
+ ASSERT(false);
+ return max;
+ }
+ virtual BSONObj minFrom(const BSONObj& min) const {
+ return min;
+ }
- ChunkDiffUnitTest( bool inverse ) : _inverse( inverse ) {}
+ virtual bool isMinKeyIndexed() const {
+ return false;
+ }
- // The default pass-through adapter for using config diffs
- class DefaultDiffAdapter : public ConfigDiffTracker<BSONObj,string> {
- public:
+ virtual pair<BSONObj, BSONObj> rangeFor(const BSONObj& chunkDoc,
+ const BSONObj& min,
+ const BSONObj& max) const {
+ return make_pair(max, min);
+ }
+ };
- DefaultDiffAdapter() {}
- virtual ~DefaultDiffAdapter() {}
+ // Allow validating with and without ranges (b/c our splits won't actually be updated by the diffs)
+ void validate(BSONArray chunks, ChunkVersion maxVersion, const VersionMap& maxShardVersions) {
+ validate(chunks, NULL, maxVersion, maxShardVersions);
+ }
- virtual bool isTracked( const BSONObj& chunkDoc ) const { return true; }
- virtual BSONObj maxFrom( const BSONObj& max ) const { return max; }
+ void validate(BSONArray chunks,
+ const RangeMap& ranges,
+ ChunkVersion maxVersion,
+ const VersionMap& maxShardVersions) {
+ validate(chunks, (RangeMap*)&ranges, maxVersion, maxShardVersions);
+ }
- virtual pair<BSONObj,BSONObj> rangeFor( const BSONObj& chunkDoc, const BSONObj& min, const BSONObj& max ) const {
- return make_pair( min, max );
+ // Validates that the ranges and versions are valid given the chunks
+ void validate(const BSONArray& chunks,
+ RangeMap* ranges,
+ ChunkVersion maxVersion,
+ const VersionMap& maxShardVersions) {
+ BSONObjIterator it(chunks);
+ int chunkCount = 0;
+ ChunkVersion foundMaxVersion;
+ VersionMap foundMaxShardVersions;
+
+ //
+ // Validate that all the chunks are there and collect versions
+ //
+
+ while (it.more()) {
+ BSONObj chunkDoc = it.next().Obj();
+ chunkCount++;
+
+ if (ranges != NULL) {
+ // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs " << chunkCount << endl;
+
+ RangeMap::iterator chunkRange =
+ ranges->find(_inverse ? chunkDoc["max"].Obj() : chunkDoc["min"].Obj());
+
+ ASSERT(chunkRange != ranges->end());
+ ASSERT(chunkRange->second.woCompare(_inverse ? chunkDoc["min"].Obj()
+ : chunkDoc["max"].Obj()) == 0);
}
- virtual string shardFor( const string& name ) const { return name; }
- };
-
- // Inverts the storage order for chunks from min to max
- class InverseDiffAdapter : public DefaultDiffAdapter {
- public:
-
- InverseDiffAdapter() {}
- virtual ~InverseDiffAdapter() {}
-
- // Disable
- virtual BSONObj maxFrom( const BSONObj& max ) const { ASSERT( false ); return max; }
- virtual BSONObj minFrom( const BSONObj& min ) const { return min; }
+ ChunkVersion version =
+ ChunkVersion::fromBSON(chunkDoc[ChunkType::DEPRECATED_lastmod()]);
+ if (version > foundMaxVersion)
+ foundMaxVersion = version;
- virtual bool isMinKeyIndexed() const { return false; }
-
- virtual pair<BSONObj,BSONObj> rangeFor( const BSONObj& chunkDoc, const BSONObj& min, const BSONObj& max ) const {
- return make_pair( max, min );
+ ChunkVersion shardMaxVersion =
+ foundMaxShardVersions[chunkDoc[ChunkType::shard()].String()];
+ if (version > shardMaxVersion) {
+ foundMaxShardVersions[chunkDoc[ChunkType::shard()].String()] = version;
}
- };
-
- // Allow validating with and without ranges (b/c our splits won't actually be updated by the diffs)
- void validate( BSONArray chunks, ChunkVersion maxVersion, const VersionMap& maxShardVersions ){
- validate( chunks, NULL, maxVersion, maxShardVersions );
- }
-
- void validate( BSONArray chunks, const RangeMap& ranges, ChunkVersion maxVersion, const VersionMap& maxShardVersions ){
- validate( chunks, (RangeMap*)&ranges, maxVersion, maxShardVersions );
}
- // Validates that the ranges and versions are valid given the chunks
- void validate( const BSONArray& chunks, RangeMap* ranges, ChunkVersion maxVersion, const VersionMap& maxShardVersions ){
-
- BSONObjIterator it( chunks );
- int chunkCount = 0;
- ChunkVersion foundMaxVersion;
- VersionMap foundMaxShardVersions;
-
- //
- // Validate that all the chunks are there and collect versions
- //
-
- while( it.more() ){
-
- BSONObj chunkDoc = it.next().Obj();
- chunkCount++;
+ // Make sure all chunks are accounted for
+ if (ranges != NULL)
+ ASSERT(chunkCount == (int)ranges->size());
- if( ranges != NULL ){
+ // log() << "Validating that all shard versions are up to date..." << endl;
- // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs " << chunkCount << endl;
+ // Validate that all the versions are the same
+ ASSERT(foundMaxVersion.equals(maxVersion));
- RangeMap::iterator chunkRange = ranges->find( _inverse ? chunkDoc["max"].Obj() : chunkDoc["min"].Obj() );
+ for (VersionMap::iterator it = foundMaxShardVersions.begin();
+ it != foundMaxShardVersions.end();
+ it++) {
+ ChunkVersion foundVersion = it->second;
+ VersionMap::const_iterator maxIt = maxShardVersions.find(it->first);
- ASSERT( chunkRange != ranges->end() );
- ASSERT( chunkRange->second.woCompare( _inverse ? chunkDoc["min"].Obj() : chunkDoc["max"].Obj() ) == 0 );
- }
-
- ChunkVersion version =
- ChunkVersion::fromBSON(chunkDoc[ChunkType::DEPRECATED_lastmod()]);
- if( version > foundMaxVersion ) foundMaxVersion = version;
+ ASSERT(maxIt != maxShardVersions.end());
+ ASSERT(foundVersion.equals(maxIt->second));
+ }
+ // Make sure all shards are accounted for
+ ASSERT(foundMaxShardVersions.size() == maxShardVersions.size());
+ }
- ChunkVersion shardMaxVersion =
- foundMaxShardVersions[chunkDoc[ChunkType::shard()].String()];
- if( version > shardMaxVersion ) {
- foundMaxShardVersions[chunkDoc[ChunkType::shard()].String() ] = version;
- }
+ void run() {
+ int numShards = 10;
+ int numInitialChunks = 5;
+ int maxChunks = 100000; // Needed to not overflow the BSONArray's max bytes
+ int keySize = 2;
+
+ BSONArrayBuilder chunksB;
+
+ BSONObj lastSplitPt;
+ ChunkVersion version(1, 0, OID());
+
+ //
+ // Generate numChunks with a given key size over numShards
+ // All chunks have double key values, so we can split them a bunch
+ //
+
+ for (int i = -1; i < numInitialChunks; i++) {
+ BSONObjBuilder splitPtB;
+ for (int k = 0; k < keySize; k++) {
+ string field = string("k") + string(1, (char)('0' + k));
+ if (i < 0)
+ splitPtB.appendMinKey(field);
+ else if (i < numInitialChunks - 1)
+ splitPtB.append(field, (double)i);
+ else
+ splitPtB.appendMaxKey(field);
}
+ BSONObj splitPt = splitPtB.obj();
- // Make sure all chunks are accounted for
- if( ranges != NULL ) ASSERT( chunkCount == (int) ranges->size() );
-
- // log() << "Validating that all shard versions are up to date..." << endl;
+ if (i >= 0) {
+ BSONObjBuilder chunkB;
- // Validate that all the versions are the same
- ASSERT( foundMaxVersion.equals( maxVersion ) );
+ chunkB.append(ChunkType::min(), lastSplitPt);
+ chunkB.append(ChunkType::max(), splitPt);
- for( VersionMap::iterator it = foundMaxShardVersions.begin(); it != foundMaxShardVersions.end(); it++ ){
+ int shardNum = rand(numShards);
+ chunkB.append(ChunkType::shard(), "shard" + string(1, (char)('A' + shardNum)));
- ChunkVersion foundVersion = it->second;
- VersionMap::const_iterator maxIt = maxShardVersions.find( it->first );
+ rand(2) ? version.incMajor() : version.incMinor();
+ version.addToBSON(chunkB, ChunkType::DEPRECATED_lastmod());
- ASSERT( maxIt != maxShardVersions.end() );
- ASSERT( foundVersion.equals( maxIt->second ) );
+ chunksB.append(chunkB.obj());
}
- // Make sure all shards are accounted for
- ASSERT( foundMaxShardVersions.size() == maxShardVersions.size() );
- }
-
- void run() {
- int numShards = 10;
- int numInitialChunks = 5;
- int maxChunks = 100000; // Needed to not overflow the BSONArray's max bytes
- int keySize = 2;
+ lastSplitPt = splitPt;
+ }
- BSONArrayBuilder chunksB;
+ BSONArray chunks = chunksB.arr();
- BSONObj lastSplitPt;
- ChunkVersion version( 1, 0, OID() );
+ // log() << "Chunks generated : " << chunks << endl;
- //
- // Generate numChunks with a given key size over numShards
- // All chunks have double key values, so we can split them a bunch
- //
+ DBClientMockCursor chunksCursor(chunks);
- for( int i = -1; i < numInitialChunks; i++ ){
+ // Setup the empty ranges and versions first
+ RangeMap ranges;
+ ChunkVersion maxVersion = ChunkVersion(0, 0, OID());
+ VersionMap maxShardVersions;
- BSONObjBuilder splitPtB;
- for( int k = 0; k < keySize; k++ ){
- string field = string( "k" ) + string( 1, (char)('0' + k) );
- if( i < 0 )
- splitPtB.appendMinKey( field );
- else if( i < numInitialChunks - 1 )
- splitPtB.append( field, (double)i );
- else
- splitPtB.appendMaxKey( field );
- }
- BSONObj splitPt = splitPtB.obj();
+ // Create a differ which will track our progress
+ boost::shared_ptr<DefaultDiffAdapter> differ(_inverse ? new InverseDiffAdapter()
+ : new DefaultDiffAdapter());
+ differ->attach("test", ranges, maxVersion, maxShardVersions);
- if( i >= 0 ){
- BSONObjBuilder chunkB;
+ // Validate initial load
+ differ->calculateConfigDiff(chunksCursor);
+ validate(chunks, ranges, maxVersion, maxShardVersions);
- chunkB.append(ChunkType::min(), lastSplitPt );
- chunkB.append(ChunkType::max(), splitPt );
+ // Generate a lot of diffs, and keep validating that updating from the diffs always
+ // gives us the right ranges and versions
- int shardNum = rand( numShards );
- chunkB.append(ChunkType::shard(),
- "shard" + string( 1, (char)('A' + shardNum) ) );
+ int numDiffs = 135; // Makes about 100000 chunks overall
+ int numChunks = numInitialChunks;
+ for (int i = 0; i < numDiffs; i++) {
+ // log() << "Generating new diff... " << i << endl;
- rand( 2 ) ? version.incMajor() : version.incMinor();
- version.addToBSON(chunkB, ChunkType::DEPRECATED_lastmod());
+ BSONArrayBuilder diffsB;
+ BSONArrayBuilder newChunksB;
+ BSONObjIterator chunksIt(chunks);
- chunksB.append( chunkB.obj() );
- }
+ while (chunksIt.more()) {
+ BSONObj chunk = chunksIt.next().Obj();
- lastSplitPt = splitPt;
- }
+ int randChoice = rand(10);
- BSONArray chunks = chunksB.arr();
+ if (randChoice < 2 && numChunks < maxChunks) {
+ // Simulate a split
- // log() << "Chunks generated : " << chunks << endl;
+ // log() << " ...starting a split with chunk " << chunk << endl;
- DBClientMockCursor chunksCursor( chunks );
+ BSONObjBuilder leftB;
+ BSONObjBuilder rightB;
+ BSONObjBuilder midB;
- // Setup the empty ranges and versions first
- RangeMap ranges;
- ChunkVersion maxVersion = ChunkVersion( 0, 0, OID() );
- VersionMap maxShardVersions;
+ for (int k = 0; k < keySize; k++) {
+ string field = string("k") + string(1, (char)('0' + k));
- // Create a differ which will track our progress
- boost::shared_ptr< DefaultDiffAdapter > differ( _inverse ? new InverseDiffAdapter() : new DefaultDiffAdapter() );
- differ->attach( "test", ranges, maxVersion, maxShardVersions );
+ BSONType maxType = chunk[ChunkType::max()].Obj()[field].type();
+ double max =
+ maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0;
+ BSONType minType = chunk[ChunkType::min()].Obj()[field].type();
+ double min = minType == NumberDouble
+ ? chunk[ChunkType::min()].Obj()[field].Number()
+ : 0.0;
- // Validate initial load
- differ->calculateConfigDiff( chunksCursor );
- validate( chunks, ranges, maxVersion, maxShardVersions );
+ if (minType == MinKey) {
+ midB.append(field, max - 1.0);
+ } else if (maxType == MaxKey) {
+ midB.append(field, min + 1.0);
+ } else {
+ midB.append(field, (max + min) / 2.0);
+ }
+ }
- // Generate a lot of diffs, and keep validating that updating from the diffs always
- // gives us the right ranges and versions
+ BSONObj midPt = midB.obj();
+ // Only happens if we can't split the min chunk
+ if (midPt.isEmpty())
+ continue;
- int numDiffs = 135; // Makes about 100000 chunks overall
- int numChunks = numInitialChunks;
- for( int i = 0; i < numDiffs; i++ ){
+ leftB.append(chunk[ChunkType::min()]);
+ leftB.append(ChunkType::max(), midPt);
+ rightB.append(ChunkType::min(), midPt);
+ rightB.append(chunk[ChunkType::max()]);
- // log() << "Generating new diff... " << i << endl;
+ leftB.append(chunk[ChunkType::shard()]);
+ rightB.append(chunk[ChunkType::shard()]);
- BSONArrayBuilder diffsB;
- BSONArrayBuilder newChunksB;
- BSONObjIterator chunksIt( chunks );
+ version.incMajor();
+ version._minor = 0;
+ version.addToBSON(leftB, ChunkType::DEPRECATED_lastmod());
+ version.incMinor();
+ version.addToBSON(rightB, ChunkType::DEPRECATED_lastmod());
- while( chunksIt.more() ){
+ BSONObj left = leftB.obj();
+ BSONObj right = rightB.obj();
- BSONObj chunk = chunksIt.next().Obj();
+ // log() << " ... split into " << left << " and " << right << endl;
- int randChoice = rand( 10 );
+ newChunksB.append(left);
+ newChunksB.append(right);
- if( randChoice < 2 && numChunks < maxChunks ){
- // Simulate a split
+ diffsB.append(right);
+ diffsB.append(left);
- // log() << " ...starting a split with chunk " << chunk << endl;
+ numChunks++;
+ } else if (randChoice < 4 && chunksIt.more()) {
+ // Simulate a migrate
- BSONObjBuilder leftB;
- BSONObjBuilder rightB;
- BSONObjBuilder midB;
+ // log() << " ...starting a migrate with chunk " << chunk << endl;
- for( int k = 0; k < keySize; k++ ){
- string field = string( "k" ) + string( 1, (char)('0' + k) );
+ BSONObj prevShardChunk;
+ while (chunksIt.more()) {
+ prevShardChunk = chunksIt.next().Obj();
+ if (prevShardChunk[ChunkType::shard()].String() ==
+ chunk[ChunkType::shard()].String())
+ break;
- BSONType maxType = chunk[ChunkType::max()].Obj()[field].type();
- double max = maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0;
- BSONType minType = chunk[ChunkType::min()].Obj()[field].type();
- double min = minType == NumberDouble ?
- chunk[ChunkType::min()].Obj()[field].Number() :
- 0.0;
+ // log() << "... appending chunk from diff shard: " << prevShardChunk << endl;
+ newChunksB.append(prevShardChunk);
- if( minType == MinKey ){
- midB.append( field, max - 1.0 );
- }
- else if( maxType == MaxKey ){
- midB.append( field, min + 1.0 );
- }
- else {
- midB.append( field, ( max + min ) / 2.0 );
- }
- }
+ prevShardChunk = BSONObj();
+ }
- BSONObj midPt = midB.obj();
- // Only happens if we can't split the min chunk
- if( midPt.isEmpty() ) continue;
+ // We need to move between different shards, hence the weirdness in logic here
+ if (!prevShardChunk.isEmpty()) {
+ BSONObjBuilder newShardB;
+ BSONObjBuilder prevShardB;
- leftB.append( chunk[ChunkType::min()] );
- leftB.append(ChunkType::max(), midPt );
- rightB.append(ChunkType::min(), midPt );
- rightB.append(chunk[ChunkType::max()] );
+ newShardB.append(chunk[ChunkType::min()]);
+ newShardB.append(chunk[ChunkType::max()]);
+ prevShardB.append(prevShardChunk[ChunkType::min()]);
+ prevShardB.append(prevShardChunk[ChunkType::max()]);
- leftB.append(chunk[ChunkType::shard()] );
- rightB.append(chunk[ChunkType::shard()] );
+ int shardNum = rand(numShards);
+ newShardB.append(ChunkType::shard(),
+ "shard" + string(1, (char)('A' + shardNum)));
+ prevShardB.append(prevShardChunk[ChunkType::shard()]);
version.incMajor();
version._minor = 0;
- version.addToBSON(leftB, ChunkType::DEPRECATED_lastmod());
+ version.addToBSON(newShardB, ChunkType::DEPRECATED_lastmod());
version.incMinor();
- version.addToBSON(rightB, ChunkType::DEPRECATED_lastmod());
-
- BSONObj left = leftB.obj();
- BSONObj right = rightB.obj();
-
- // log() << " ... split into " << left << " and " << right << endl;
-
- newChunksB.append( left );
- newChunksB.append( right );
-
- diffsB.append( right );
- diffsB.append( left );
-
- numChunks++;
- }
- else if( randChoice < 4 && chunksIt.more() ){
- // Simulate a migrate
-
- // log() << " ...starting a migrate with chunk " << chunk << endl;
-
- BSONObj prevShardChunk;
- while( chunksIt.more() ){
- prevShardChunk = chunksIt.next().Obj();
- if( prevShardChunk[ChunkType::shard()].String() ==
- chunk[ChunkType::shard()].String() ) break;
+ version.addToBSON(prevShardB, ChunkType::DEPRECATED_lastmod());
- // log() << "... appending chunk from diff shard: " << prevShardChunk << endl;
- newChunksB.append( prevShardChunk );
+ BSONObj newShard = newShardB.obj();
+ BSONObj prevShard = prevShardB.obj();
- prevShardChunk = BSONObj();
- }
-
- // We need to move between different shards, hence the weirdness in logic here
- if( ! prevShardChunk.isEmpty() ){
-
- BSONObjBuilder newShardB;
- BSONObjBuilder prevShardB;
-
- newShardB.append(chunk[ChunkType::min()]);
- newShardB.append(chunk[ChunkType::max()]);
- prevShardB.append(prevShardChunk[ChunkType::min()]);
- prevShardB.append(prevShardChunk[ChunkType::max()]);
-
- int shardNum = rand( numShards );
- newShardB.append(ChunkType::shard(),
- "shard" + string( 1, (char)('A' + shardNum)));
- prevShardB.append(prevShardChunk[ChunkType::shard()]);
-
- version.incMajor();
- version._minor = 0;
- version.addToBSON(newShardB, ChunkType::DEPRECATED_lastmod());
- version.incMinor();
- version.addToBSON(prevShardB, ChunkType::DEPRECATED_lastmod());
-
- BSONObj newShard = newShardB.obj();
- BSONObj prevShard = prevShardB.obj();
-
- // log() << " ... migrated to " << newShard << " and updated " << prevShard << endl;
+ // log() << " ... migrated to " << newShard << " and updated " << prevShard << endl;
- newChunksB.append( newShard );
- newChunksB.append( prevShard );
+ newChunksB.append(newShard);
+ newChunksB.append(prevShard);
- diffsB.append( newShard );
- diffsB.append( prevShard );
+ diffsB.append(newShard);
+ diffsB.append(prevShard);
- }
- else{
- // log() << "... appending chunk, no more left: " << chunk << endl;
- newChunksB.append( chunk );
- }
+ } else {
+ // log() << "... appending chunk, no more left: " << chunk << endl;
+ newChunksB.append(chunk);
}
- else{
- // log() << "Appending chunk : " << chunk << endl;
- newChunksB.append( chunk );
- }
-
- }
-
- BSONArray diffs = diffsB.arr();
- chunks = newChunksB.arr();
-
- // log() << "Diffs generated : " << diffs << endl;
- // log() << "All chunks : " << chunks << endl;
-
- // Rarely entirely clear out our data
- if( rand( 10 ) < 1 ){
- diffs = chunks;
- ranges.clear();
- maxVersion = ChunkVersion( 0, 0, OID() );
- maxShardVersions.clear();
+ } else {
+ // log() << "Appending chunk : " << chunk << endl;
+ newChunksB.append(chunk);
}
+ }
- // log() << "Total number of chunks : " << numChunks << " iteration " << i << endl;
-
- DBClientMockCursor diffCursor( diffs );
-
- differ->calculateConfigDiff( diffCursor );
+ BSONArray diffs = diffsB.arr();
+ chunks = newChunksB.arr();
- validate( chunks, ranges, maxVersion, maxShardVersions );
+ // log() << "Diffs generated : " << diffs << endl;
+ // log() << "All chunks : " << chunks << endl;
+ // Rarely entirely clear out our data
+ if (rand(10) < 1) {
+ diffs = chunks;
+ ranges.clear();
+ maxVersion = ChunkVersion(0, 0, OID());
+ maxShardVersions.clear();
}
- }
- };
-
- class ChunkDiffUnitTestNormal : public ChunkDiffUnitTest {
- public:
- ChunkDiffUnitTestNormal() : ChunkDiffUnitTest( false ) {}
- };
+ // log() << "Total number of chunks : " << numChunks << " iteration " << i << endl;
- class ChunkDiffUnitTestInverse : public ChunkDiffUnitTest {
- public:
- ChunkDiffUnitTestInverse() : ChunkDiffUnitTest( true ) {}
- };
+ DBClientMockCursor diffCursor(diffs);
- class All : public Suite {
- public:
- All() : Suite( "sharding" ) {
- }
+ differ->calculateConfigDiff(diffCursor);
- void setupTests() {
- add< serverandquerytests::test1 >();
- add< ChunkManagerCreateBasicTest >();
- add< ChunkManagerCreateFullTest >();
- add< ChunkManagerLoadBasicTest >();
- add< ChunkDiffUnitTestNormal >();
- add< ChunkDiffUnitTestInverse >();
+ validate(chunks, ranges, maxVersion, maxShardVersions);
}
- };
-
- SuiteInstance<All> myall;
+ }
+};
+
+class ChunkDiffUnitTestNormal : public ChunkDiffUnitTest {
+public:
+ ChunkDiffUnitTestNormal() : ChunkDiffUnitTest(false) {}
+};
+
+class ChunkDiffUnitTestInverse : public ChunkDiffUnitTest {
+public:
+ ChunkDiffUnitTestInverse() : ChunkDiffUnitTest(true) {}
+};
+
+class All : public Suite {
+public:
+ All() : Suite("sharding") {}
+
+ void setupTests() {
+ add<serverandquerytests::test1>();
+ add<ChunkManagerCreateBasicTest>();
+ add<ChunkManagerCreateFullTest>();
+ add<ChunkManagerLoadBasicTest>();
+ add<ChunkDiffUnitTestNormal>();
+ add<ChunkDiffUnitTestInverse>();
+ }
+};
+SuiteInstance<All> myall;
}
diff --git a/src/mongo/dbtests/socktests.cpp b/src/mongo/dbtests/socktests.cpp
index a4563d47ed8..baa633e7e3c 100644
--- a/src/mongo/dbtests/socktests.cpp
+++ b/src/mongo/dbtests/socktests.cpp
@@ -37,28 +37,27 @@
namespace SockTests {
- class HostByName {
- public:
- void run() {
- ASSERT_EQUALS( "127.0.0.1", hostbyname( "localhost" ) );
- ASSERT_EQUALS( "127.0.0.1", hostbyname( "127.0.0.1" ) );
- // ASSERT_EQUALS( "::1", hostbyname( "::1" ) ); // IPv6 disabled at runtime by default.
-
- HostAndPort h("asdfasdfasdf_no_such_host");
- // this fails uncomment when fixed.
- ASSERT(!mongo::repl::isSelf(h));
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "sock" ) {}
- void setupTests() {
- add< HostByName >();
- }
- };
-
- SuiteInstance<All> myall;
-
-} // namespace SockTests
-
+class HostByName {
+public:
+ void run() {
+ ASSERT_EQUALS("127.0.0.1", hostbyname("localhost"));
+ ASSERT_EQUALS("127.0.0.1", hostbyname("127.0.0.1"));
+ // ASSERT_EQUALS( "::1", hostbyname( "::1" ) ); // IPv6 disabled at runtime by default.
+
+ HostAndPort h("asdfasdfasdf_no_such_host");
+ // this fails uncomment when fixed.
+ ASSERT(!mongo::repl::isSelf(h));
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("sock") {}
+ void setupTests() {
+ add<HostByName>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace SockTests
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 62d8465e940..5e1c5d7945d 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -53,798 +53,771 @@
namespace ThreadedTests {
- using std::auto_ptr;
- using std::cout;
- using std::endl;
- using std::string;
-
- template <int nthreads_param=10>
- class ThreadedTest {
- public:
- virtual void setup() {} //optional
- virtual void subthread(int remaining) = 0; // each thread whatever test work you want done
- virtual void validate() = 0; // after work is done
-
- static const int nthreads = nthreads_param;
-
- void run() {
- setup();
- launch_subthreads(nthreads);
- validate();
- }
+using std::auto_ptr;
+using std::cout;
+using std::endl;
+using std::string;
+
+template <int nthreads_param = 10>
+class ThreadedTest {
+public:
+ virtual void setup() {} // optional
+ virtual void subthread(int remaining) = 0; // each thread whatever test work you want done
+ virtual void validate() = 0; // after work is done
+
+ static const int nthreads = nthreads_param;
+
+ void run() {
+ setup();
+ launch_subthreads(nthreads);
+ validate();
+ }
- virtual ~ThreadedTest() {}; // not necessary, but makes compilers happy
+ virtual ~ThreadedTest(){}; // not necessary, but makes compilers happy
- private:
- void launch_subthreads(int remaining) {
- if (!remaining)
- return;
+private:
+ void launch_subthreads(int remaining) {
+ if (!remaining)
+ return;
- boost::thread athread(stdx::bind(&ThreadedTest::subthread, this, remaining));
- launch_subthreads(remaining - 1);
- athread.join();
- }
- };
+ boost::thread athread(stdx::bind(&ThreadedTest::subthread, this, remaining));
+ launch_subthreads(remaining - 1);
+ athread.join();
+ }
+};
#ifdef MONGO_PLATFORM_32
- // Avoid OOM on Linux-32 by using fewer threads
- const int nthr=45;
+// Avoid OOM on Linux-32 by using fewer threads
+const int nthr = 45;
#else
- const int nthr=135;
+const int nthr = 135;
#endif
- class MongoMutexTest : public ThreadedTest<nthr> {
+class MongoMutexTest : public ThreadedTest<nthr> {
#if defined(_DEBUG)
- enum { N = 2000 };
+ enum { N = 2000 };
#else
- enum { N = 4000/*0*/ };
+ enum { N = 4000 /*0*/ };
#endif
- ProgressMeter pm;
-
- public:
- MongoMutexTest() : pm(N * nthreads) {
-
- }
-
- void run() {
- Timer t;
- cout << "MongoMutexTest N:" << N << endl;
- ThreadedTest<nthr>::run();
- cout << "MongoMutexTest " << t.millis() << "ms" << endl;
- }
+ ProgressMeter pm;
- private:
+public:
+ MongoMutexTest() : pm(N * nthreads) {}
- virtual void subthread(int tnumber) {
- Client::initThread("mongomutextest");
-
- OperationContextImpl txn;
+ void run() {
+ Timer t;
+ cout << "MongoMutexTest N:" << N << endl;
+ ThreadedTest<nthr>::run();
+ cout << "MongoMutexTest " << t.millis() << "ms" << endl;
+ }
- sleepmillis(0);
- for( int i = 0; i < N; i++ ) {
- int x = std::rand();
- bool sometimes = (x % 15 == 0);
- if( i % 7 == 0 ) {
- Lock::GlobalRead r(txn.lockState()); // nested test
- Lock::GlobalRead r2(txn.lockState());
+private:
+ virtual void subthread(int tnumber) {
+ Client::initThread("mongomutextest");
+
+ OperationContextImpl txn;
+
+ sleepmillis(0);
+ for (int i = 0; i < N; i++) {
+ int x = std::rand();
+ bool sometimes = (x % 15 == 0);
+ if (i % 7 == 0) {
+ Lock::GlobalRead r(txn.lockState()); // nested test
+ Lock::GlobalRead r2(txn.lockState());
+ } else if (i % 7 == 1) {
+ Lock::GlobalRead r(txn.lockState());
+ ASSERT(txn.lockState()->isReadLocked());
+ } else if (i % 7 == 4 && tnumber == 1 /*only one upgrader legal*/) {
+ Lock::GlobalWrite w(txn.lockState());
+ ASSERT(txn.lockState()->isW());
+ if (i % 7 == 2) {
+ Lock::TempRelease t(txn.lockState());
}
- else if( i % 7 == 1 ) {
- Lock::GlobalRead r(txn.lockState());
- ASSERT(txn.lockState()->isReadLocked());
+ } else if (i % 7 == 2) {
+ Lock::GlobalWrite w(txn.lockState());
+ ASSERT(txn.lockState()->isW());
+ if (sometimes) {
+ Lock::TempRelease t(txn.lockState());
}
- else if( i % 7 == 4 &&
- tnumber == 1 /*only one upgrader legal*/ ) {
- Lock::GlobalWrite w(txn.lockState());
- ASSERT( txn.lockState()->isW() );
- if( i % 7 == 2 ) {
- Lock::TempRelease t(txn.lockState());
- }
+ } else if (i % 7 == 3) {
+ Lock::GlobalWrite w(txn.lockState());
+ { Lock::TempRelease t(txn.lockState()); }
+ Lock::GlobalRead r(txn.lockState());
+ ASSERT(txn.lockState()->isW());
+ if (sometimes) {
+ Lock::TempRelease t(txn.lockState());
}
- else if( i % 7 == 2 ) {
- Lock::GlobalWrite w(txn.lockState());
- ASSERT( txn.lockState()->isW() );
- if( sometimes ) {
- Lock::TempRelease t(txn.lockState());
- }
+ } else if (i % 7 == 5) {
+ {
+ ScopedTransaction scopedXact(&txn, MODE_IS);
+ Lock::DBLock r(txn.lockState(), "foo", MODE_S);
}
- else if( i % 7 == 3 ) {
- Lock::GlobalWrite w(txn.lockState());
- {
- Lock::TempRelease t(txn.lockState());
- }
- Lock::GlobalRead r(txn.lockState());
- ASSERT( txn.lockState()->isW() );
- if( sometimes ) {
- Lock::TempRelease t(txn.lockState());
- }
+ {
+ ScopedTransaction scopedXact(&txn, MODE_IS);
+ Lock::DBLock r(txn.lockState(), "bar", MODE_S);
}
- else if( i % 7 == 5 ) {
- {
- ScopedTransaction scopedXact(&txn, MODE_IS);
- Lock::DBLock r(txn.lockState(), "foo", MODE_S);
- }
- {
+ } else if (i % 7 == 6) {
+ if (i > N / 2) {
+ int q = i % 11;
+ if (q == 0) {
ScopedTransaction scopedXact(&txn, MODE_IS);
- Lock::DBLock r(txn.lockState(), "bar", MODE_S);
- }
- }
- else if( i % 7 == 6 ) {
- if( i > N/2 ) {
- int q = i % 11;
- if( q == 0 ) {
- ScopedTransaction scopedXact(&txn, MODE_IS);
- Lock::DBLock r(txn.lockState(), "foo", MODE_S);
- ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
-
- Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
- ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
+ Lock::DBLock r(txn.lockState(), "foo", MODE_S);
+ ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
- Lock::DBLock r3(txn.lockState(), "local", MODE_S);
- ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
- ASSERT(txn.lockState()->isDbLockedForMode("local", MODE_S));
- }
- else if( q == 1 ) {
- // test locking local only -- with no preceding lock
- {
- ScopedTransaction scopedXact(&txn, MODE_IS);
- Lock::DBLock x(txn.lockState(), "local", MODE_S);
- }
- {
- ScopedTransaction scopedXact(&txn, MODE_IX);
- Lock::DBLock x(txn.lockState(), "local", MODE_X);
-
- // No actual writing here, so no WriteUnitOfWork
- if( sometimes ) {
- Lock::TempRelease t(txn.lockState());
- }
- }
- } else if( q == 1 ) {
- {
- ScopedTransaction scopedXact(&txn, MODE_IS);
- Lock::DBLock x(txn.lockState(), "admin", MODE_S);
- }
+ Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
+ ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
- {
- ScopedTransaction scopedXact(&txn, MODE_IX);
- Lock::DBLock x(txn.lockState(), "admin", MODE_X);
- }
+ Lock::DBLock r3(txn.lockState(), "local", MODE_S);
+ ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
+ ASSERT(txn.lockState()->isDbLockedForMode("local", MODE_S));
+ } else if (q == 1) {
+ // test locking local only -- with no preceding lock
+ {
+ ScopedTransaction scopedXact(&txn, MODE_IS);
+ Lock::DBLock x(txn.lockState(), "local", MODE_S);
}
- else if( q == 3 ) {
+ {
ScopedTransaction scopedXact(&txn, MODE_IX);
+ Lock::DBLock x(txn.lockState(), "local", MODE_X);
- Lock::DBLock x(txn.lockState(), "foo", MODE_X);
- Lock::DBLock y(txn.lockState(), "admin", MODE_S);
+ // No actual writing here, so no WriteUnitOfWork
+ if (sometimes) {
+ Lock::TempRelease t(txn.lockState());
+ }
}
- else if( q == 4 ) {
+ } else if (q == 1) {
+ {
ScopedTransaction scopedXact(&txn, MODE_IS);
-
- Lock::DBLock x(txn.lockState(), "foo2", MODE_S);
- Lock::DBLock y(txn.lockState(), "admin", MODE_S);
+ Lock::DBLock x(txn.lockState(), "admin", MODE_S);
}
- else {
+
+ {
ScopedTransaction scopedXact(&txn, MODE_IX);
+ Lock::DBLock x(txn.lockState(), "admin", MODE_X);
+ }
+ } else if (q == 3) {
+ ScopedTransaction scopedXact(&txn, MODE_IX);
- Lock::DBLock w(txn.lockState(), "foo", MODE_X);
+ Lock::DBLock x(txn.lockState(), "foo", MODE_X);
+ Lock::DBLock y(txn.lockState(), "admin", MODE_S);
+ } else if (q == 4) {
+ ScopedTransaction scopedXact(&txn, MODE_IS);
- {
- Lock::TempRelease t(txn.lockState());
- }
+ Lock::DBLock x(txn.lockState(), "foo2", MODE_S);
+ Lock::DBLock y(txn.lockState(), "admin", MODE_S);
+ } else {
+ ScopedTransaction scopedXact(&txn, MODE_IX);
- Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
- Lock::DBLock r3(txn.lockState(), "local", MODE_S);
- }
- }
- else {
- ScopedTransaction scopedXact(&txn, MODE_IS);
+ Lock::DBLock w(txn.lockState(), "foo", MODE_X);
+
+ { Lock::TempRelease t(txn.lockState()); }
- Lock::DBLock r(txn.lockState(), "foo", MODE_S);
Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
Lock::DBLock r3(txn.lockState(), "local", MODE_S);
}
+ } else {
+ ScopedTransaction scopedXact(&txn, MODE_IS);
+
+ Lock::DBLock r(txn.lockState(), "foo", MODE_S);
+ Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
+ Lock::DBLock r3(txn.lockState(), "local", MODE_S);
}
- pm.hit();
}
- cc().shutdown();
+ pm.hit();
}
+ cc().shutdown();
+ }
- virtual void validate() {
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite w(&ls);
- }
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead r(&ls);
- }
+ virtual void validate() {
+ {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite w(&ls);
}
- };
+ {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalRead r(&ls);
+ }
+ }
+};
- template <typename _AtomicUInt>
- class IsAtomicWordAtomic : public ThreadedTest<> {
- static const int iterations = 1000000;
- typedef typename _AtomicUInt::WordType WordType;
- _AtomicUInt target;
+template <typename _AtomicUInt>
+class IsAtomicWordAtomic : public ThreadedTest<> {
+ static const int iterations = 1000000;
+ typedef typename _AtomicUInt::WordType WordType;
+ _AtomicUInt target;
- void subthread(int) {
- for(int i=0; i < iterations; i++) {
- target.fetchAndAdd(WordType(1));
- }
+ void subthread(int) {
+ for (int i = 0; i < iterations; i++) {
+ target.fetchAndAdd(WordType(1));
}
- void validate() {
- ASSERT_EQUALS(target.load() , unsigned(nthreads * iterations));
-
- _AtomicUInt u;
- ASSERT_EQUALS(0u, u.load());
- ASSERT_EQUALS(0u, u.fetchAndAdd(WordType(1)));
- ASSERT_EQUALS(2u, u.addAndFetch(WordType(1)));
- ASSERT_EQUALS(2u, u.fetchAndSubtract(WordType(1)));
- ASSERT_EQUALS(0u, u.subtractAndFetch(WordType(1)));
- ASSERT_EQUALS(0u, u.load());
-
- u.fetchAndAdd(WordType(1));
- ASSERT_GREATER_THAN(u.load(), WordType(0));
-
- u.fetchAndSubtract(WordType(1));
- ASSERT_NOT_GREATER_THAN(u.load(), WordType(0));
- }
- };
+ }
+ void validate() {
+ ASSERT_EQUALS(target.load(), unsigned(nthreads * iterations));
+
+ _AtomicUInt u;
+ ASSERT_EQUALS(0u, u.load());
+ ASSERT_EQUALS(0u, u.fetchAndAdd(WordType(1)));
+ ASSERT_EQUALS(2u, u.addAndFetch(WordType(1)));
+ ASSERT_EQUALS(2u, u.fetchAndSubtract(WordType(1)));
+ ASSERT_EQUALS(0u, u.subtractAndFetch(WordType(1)));
+ ASSERT_EQUALS(0u, u.load());
+
+ u.fetchAndAdd(WordType(1));
+ ASSERT_GREATER_THAN(u.load(), WordType(0));
+
+ u.fetchAndSubtract(WordType(1));
+ ASSERT_NOT_GREATER_THAN(u.load(), WordType(0));
+ }
+};
- class MVarTest : public ThreadedTest<> {
- static const int iterations = 10000;
- MVar<int> target;
+class MVarTest : public ThreadedTest<> {
+ static const int iterations = 10000;
+ MVar<int> target;
- public:
- MVarTest() : target(0) {}
- void subthread(int) {
- for(int i=0; i < iterations; i++) {
- int val = target.take();
+public:
+ MVarTest() : target(0) {}
+ void subthread(int) {
+ for (int i = 0; i < iterations; i++) {
+ int val = target.take();
#if BOOST_VERSION >= 103500
- //increase chances of catching failure
- boost::this_thread::yield();
+ // increase chances of catching failure
+ boost::this_thread::yield();
#endif
- target.put(val+1);
- }
+ target.put(val + 1);
}
- void validate() {
- ASSERT_EQUALS(target.take() , nthreads * iterations);
- }
- };
+ }
+ void validate() {
+ ASSERT_EQUALS(target.take(), nthreads * iterations);
+ }
+};
- class ThreadPoolTest {
- static const unsigned iterations = 10000;
- static const unsigned nThreads = 8;
+class ThreadPoolTest {
+ static const unsigned iterations = 10000;
+ static const unsigned nThreads = 8;
- AtomicUInt32 counter;
- void increment(unsigned n) {
- for (unsigned i=0; i<n; i++) {
- counter.fetchAndAdd(1);
- }
+ AtomicUInt32 counter;
+ void increment(unsigned n) {
+ for (unsigned i = 0; i < n; i++) {
+ counter.fetchAndAdd(1);
}
+ }
- public:
- void run() {
- ThreadPool tp(nThreads);
+public:
+ void run() {
+ ThreadPool tp(nThreads);
- for (unsigned i=0; i < iterations; i++) {
- tp.schedule(&ThreadPoolTest::increment, this, 2);
- }
+ for (unsigned i = 0; i < iterations; i++) {
+ tp.schedule(&ThreadPoolTest::increment, this, 2);
+ }
- tp.join();
+ tp.join();
- ASSERT_EQUALS(counter.load(), iterations * 2);
- }
- };
+ ASSERT_EQUALS(counter.load(), iterations * 2);
+ }
+};
- class RWLockTest1 {
- public:
- void run() {
- RWLock lk( "eliot" );
- {
- rwlock r( lk , true , 1000 );
- }
+class RWLockTest1 {
+public:
+ void run() {
+ RWLock lk("eliot");
+ { rwlock r(lk, true, 1000); }
+ }
+};
+
+class RWLockTest2 {
+public:
+ static void worker1(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
+ x->fetchAndAdd(1); // 1
+ RWLockRecursiveNongreedy::Exclusive b(*lk);
+ x->fetchAndAdd(1); // 2
+ }
+ static void worker2(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
+ RWLockRecursiveNongreedy::Shared c(*lk);
+ x->fetchAndAdd(1);
+ }
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+ RWLockRecursiveNongreedy lk("eliot2", 120 * 1000);
+ cout << "RWLock impl: " << lk.implType() << endl;
+ auto_ptr<RWLockRecursiveNongreedy::Shared> a(new RWLockRecursiveNongreedy::Shared(lk));
+ AtomicUInt32 x1(0);
+ cout << "A : " << &x1 << endl;
+ boost::thread t1(stdx::bind(worker1, &lk, &x1));
+ while (!x1.load())
+ ;
+ verify(x1.load() == 1);
+ sleepmillis(500);
+ verify(x1.load() == 1);
+ AtomicUInt32 x2(0);
+ boost::thread t2(stdx::bind(worker2, &lk, &x2));
+ t2.join();
+ verify(x2.load() == 1);
+ a.reset();
+ for (int i = 0; i < 2000; i++) {
+ if (x1.load() == 2)
+ break;
+ sleepmillis(1);
}
- };
+ verify(x1.load() == 2);
+ t1.join();
+ }
+};
+
+class RWLockTest3 {
+public:
+ static void worker2(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
+ verify(!lk->__lock_try(0));
+ RWLockRecursiveNongreedy::Shared c(*lk);
+ x->fetchAndAdd(1);
+ }
- class RWLockTest2 {
- public:
- static void worker1( RWLockRecursiveNongreedy * lk , AtomicUInt32 * x ) {
- x->fetchAndAdd(1); // 1
- RWLockRecursiveNongreedy::Exclusive b(*lk);
- x->fetchAndAdd(1); // 2
- }
- static void worker2( RWLockRecursiveNongreedy * lk , AtomicUInt32 * x ) {
- RWLockRecursiveNongreedy::Shared c(*lk);
- x->fetchAndAdd(1);
- }
- void run() {
- /**
- * note: this test will deadlock if the code breaks
- */
- RWLockRecursiveNongreedy lk( "eliot2" , 120 * 1000 );
- cout << "RWLock impl: " << lk.implType() << endl;
- auto_ptr<RWLockRecursiveNongreedy::Shared> a( new RWLockRecursiveNongreedy::Shared(lk) );
- AtomicUInt32 x1(0);
- cout << "A : " << &x1 << endl;
- boost::thread t1( stdx::bind( worker1 , &lk , &x1 ) );
- while ( ! x1.load() );
- verify( x1.load() == 1 );
- sleepmillis( 500 );
- verify( x1.load() == 1 );
- AtomicUInt32 x2(0);
- boost::thread t2( stdx::bind( worker2, &lk , &x2 ) );
- t2.join();
- verify( x2.load() == 1 );
- a.reset();
- for ( int i=0; i<2000; i++ ) {
- if ( x1.load() == 2 )
- break;
- sleepmillis(1);
- }
- verify( x1.load() == 2 );
- t1.join();
- }
- };
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
- class RWLockTest3 {
- public:
- static void worker2( RWLockRecursiveNongreedy * lk , AtomicUInt32 * x ) {
- verify( ! lk->__lock_try(0) );
- RWLockRecursiveNongreedy::Shared c( *lk );
- x->fetchAndAdd(1);
- }
+ RWLockRecursiveNongreedy lk("eliot2", 120 * 1000);
- void run() {
- /**
- * note: this test will deadlock if the code breaks
- */
-
- RWLockRecursiveNongreedy lk( "eliot2" , 120 * 1000 );
-
- auto_ptr<RWLockRecursiveNongreedy::Shared> a( new RWLockRecursiveNongreedy::Shared( lk ) );
-
- AtomicUInt32 x2(0);
-
- boost::thread t2( stdx::bind( worker2, &lk , &x2 ) );
- t2.join();
- verify( x2.load() == 1 );
-
- a.reset();
- }
- };
+ auto_ptr<RWLockRecursiveNongreedy::Shared> a(new RWLockRecursiveNongreedy::Shared(lk));
- class RWLockTest4 {
- public:
-
+ AtomicUInt32 x2(0);
+
+ boost::thread t2(stdx::bind(worker2, &lk, &x2));
+ t2.join();
+ verify(x2.load() == 1);
+
+ a.reset();
+ }
+};
+
+class RWLockTest4 {
+public:
#if defined(__linux__) || defined(__APPLE__)
- static void worker1( pthread_rwlock_t * lk , AtomicUInt32 * x ) {
- x->fetchAndAdd(1); // 1
- cout << "lock b try" << endl;
- while ( 1 ) {
- if ( pthread_rwlock_trywrlock( lk ) == 0 )
- break;
- sleepmillis(10);
- }
- cout << "lock b got" << endl;
- x->fetchAndAdd(1); // 2
- pthread_rwlock_unlock( lk );
+ static void worker1(pthread_rwlock_t* lk, AtomicUInt32* x) {
+ x->fetchAndAdd(1); // 1
+ cout << "lock b try" << endl;
+ while (1) {
+ if (pthread_rwlock_trywrlock(lk) == 0)
+ break;
+ sleepmillis(10);
}
+ cout << "lock b got" << endl;
+ x->fetchAndAdd(1); // 2
+ pthread_rwlock_unlock(lk);
+ }
- static void worker2( pthread_rwlock_t * lk , AtomicUInt32 * x ) {
- cout << "lock c try" << endl;
- pthread_rwlock_rdlock( lk );
- x->fetchAndAdd(1);
- cout << "lock c got" << endl;
- pthread_rwlock_unlock( lk );
- }
+ static void worker2(pthread_rwlock_t* lk, AtomicUInt32* x) {
+ cout << "lock c try" << endl;
+ pthread_rwlock_rdlock(lk);
+ x->fetchAndAdd(1);
+ cout << "lock c got" << endl;
+ pthread_rwlock_unlock(lk);
+ }
#endif
- void run() {
- /**
- * note: this test will deadlock if the code breaks
- */
-
-#if defined(__linux__) || defined(__APPLE__)
-
- // create
- pthread_rwlock_t lk;
- verify( pthread_rwlock_init( &lk , 0 ) == 0 );
-
- // read lock
- verify( pthread_rwlock_rdlock( &lk ) == 0 );
-
- AtomicUInt32 x1(0);
- boost::thread t1( stdx::bind( worker1 , &lk , &x1 ) );
- while ( ! x1.load() );
- verify( x1.load() == 1 );
- sleepmillis( 500 );
- verify( x1.load() == 1 );
-
- AtomicUInt32 x2(0);
-
- boost::thread t2( stdx::bind( worker2, &lk , &x2 ) );
- t2.join();
- verify( x2.load() == 1 );
-
- pthread_rwlock_unlock( &lk );
-
- for ( int i=0; i<2000; i++ ) {
- if ( x1.load() == 2 )
- break;
- sleepmillis(1);
- }
+ void run() {
+/**
+ * note: this test will deadlock if the code breaks
+ */
- verify( x1.load() == 2 );
- t1.join();
-#endif
- }
- };
+#if defined(__linux__) || defined(__APPLE__)
- // we don't use upgrade so that part is not important currently but the other aspects of this test are
- // interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
- class UpgradableTest : public ThreadedTest<7> {
- RWLock m;
- public:
- UpgradableTest() : m("utest") {}
- private:
- virtual void validate() { }
- virtual void subthread(int x) {
- Client::initThread("utest");
-
- /* r = get a read lock
- R = get a read lock and we expect it to be fast
- u = get upgradable
- U = get upgradable and we expect it to be fast
- w = get a write lock
- */
- // /-- verify upgrade can be done instantly while in a read lock already
- // | /-- verify upgrade acquisition isn't greedy
- // | | /-- verify writes aren't greedy while in upgradable (or are they?)
- // v v v
- const char *what = " RURuRwR";
-
- sleepmillis(100*x);
-
- int Z = 1;
- LOG(Z) << x << ' ' << what[x] << " request" << endl;
- char ch = what[x];
- switch( ch ) {
- case 'w':
- {
- m.lock();
- LOG(Z) << x << " w got" << endl;
- sleepmillis(100);
- LOG(Z) << x << " w unlock" << endl;
- m.unlock();
- }
+ // create
+ pthread_rwlock_t lk;
+ verify(pthread_rwlock_init(&lk, 0) == 0);
+
+ // read lock
+ verify(pthread_rwlock_rdlock(&lk) == 0);
+
+ AtomicUInt32 x1(0);
+ boost::thread t1(stdx::bind(worker1, &lk, &x1));
+ while (!x1.load())
+ ;
+ verify(x1.load() == 1);
+ sleepmillis(500);
+ verify(x1.load() == 1);
+
+ AtomicUInt32 x2(0);
+
+ boost::thread t2(stdx::bind(worker2, &lk, &x2));
+ t2.join();
+ verify(x2.load() == 1);
+
+ pthread_rwlock_unlock(&lk);
+
+ for (int i = 0; i < 2000; i++) {
+ if (x1.load() == 2)
break;
+ sleepmillis(1);
+ }
+
+ verify(x1.load() == 2);
+ t1.join();
+#endif
+ }
+};
+
+// we don't use upgrade so that part is not important currently but the other aspects of this test are
+// interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
+class UpgradableTest : public ThreadedTest<7> {
+ RWLock m;
+
+public:
+ UpgradableTest() : m("utest") {}
+
+private:
+ virtual void validate() {}
+ virtual void subthread(int x) {
+ Client::initThread("utest");
+
+ /* r = get a read lock
+ R = get a read lock and we expect it to be fast
+ u = get upgradable
+ U = get upgradable and we expect it to be fast
+ w = get a write lock
+ */
+ // /-- verify upgrade can be done instantly while in a read lock already
+ // | /-- verify upgrade acquisition isn't greedy
+ // | | /-- verify writes aren't greedy while in upgradable (or are they?)
+ // v v v
+ const char* what = " RURuRwR";
+
+ sleepmillis(100 * x);
+
+ int Z = 1;
+ LOG(Z) << x << ' ' << what[x] << " request" << endl;
+ char ch = what[x];
+ switch (ch) {
+ case 'w': {
+ m.lock();
+ LOG(Z) << x << " w got" << endl;
+ sleepmillis(100);
+ LOG(Z) << x << " w unlock" << endl;
+ m.unlock();
+ } break;
case 'u':
- case 'U':
- {
- Timer t;
- RWLock::Upgradable u(m);
- LOG(Z) << x << ' ' << ch << " got" << endl;
- if( ch == 'U' ) {
+ case 'U': {
+ Timer t;
+ RWLock::Upgradable u(m);
+ LOG(Z) << x << ' ' << ch << " got" << endl;
+ if (ch == 'U') {
#if defined(NTDDI_VERSION) && defined(NTDDI_WIN7) && (NTDDI_VERSION >= NTDDI_WIN7)
- // SRW locks are neither fair nor FIFO, as per docs
- if( t.millis() > 2000 ) {
+ // SRW locks are neither fair nor FIFO, as per docs
+ if (t.millis() > 2000) {
#else
- if( t.millis() > 20 ) {
+ if (t.millis() > 20) {
#endif
- DEV {
- // a _DEBUG buildbot might be slow, try to avoid false positives
- mongo::unittest::log() <<
- "warning lock upgrade was slow " << t.millis() << endl;
- }
- else {
- mongo::unittest::log() <<
- "assertion failure: lock upgrade was too slow: " <<
- t.millis() << endl;
- ASSERT( false );
- }
+ DEV {
+ // a _DEBUG buildbot might be slow, try to avoid false positives
+ mongo::unittest::log() << "warning lock upgrade was slow " << t.millis()
+ << endl;
+ }
+ else {
+ mongo::unittest::log()
+ << "assertion failure: lock upgrade was too slow: " << t.millis()
+ << endl;
+ ASSERT(false);
}
}
- sleepsecs(1);
- LOG(Z) << x << ' ' << ch << " unlock" << endl;
}
- break;
+ sleepsecs(1);
+ LOG(Z) << x << ' ' << ch << " unlock" << endl;
+ } break;
case 'r':
- case 'R':
- {
- Timer t;
- m.lock_shared();
- LOG(Z) << x << ' ' << ch << " got " << endl;
- if( what[x] == 'R' ) {
- if( t.millis() > 15 ) {
- // commented out for less chatter, we aren't using upgradeable anyway right now:
- // log() << x << " info: when in upgradable, write locks are still greedy on this platform" << endl;
- }
+ case 'R': {
+ Timer t;
+ m.lock_shared();
+ LOG(Z) << x << ' ' << ch << " got " << endl;
+ if (what[x] == 'R') {
+ if (t.millis() > 15) {
+ // commented out for less chatter, we aren't using upgradeable anyway right now:
+ // log() << x << " info: when in upgradable, write locks are still greedy on this platform" << endl;
}
- sleepmillis(200);
- LOG(Z) << x << ' ' << ch << " unlock" << endl;
- m.unlock_shared();
}
- break;
+ sleepmillis(200);
+ LOG(Z) << x << ' ' << ch << " unlock" << endl;
+ m.unlock_shared();
+ } break;
default:
ASSERT(false);
- }
-
- cc().shutdown();
}
- };
- void sleepalittle() {
- Timer t;
- while( 1 ) {
- boost::this_thread::yield();
- if( t.micros() > 8 )
- break;
- }
+ cc().shutdown();
}
+};
+
+void sleepalittle() {
+ Timer t;
+ while (1) {
+ boost::this_thread::yield();
+ if (t.micros() > 8)
+ break;
+ }
+}
- int once;
+int once;
+
+/* This test is to see how long it takes to get a lock after there has been contention -- the OS
+ will need to reschedule us. if a spinlock, it will be fast of course, but these aren't spin locks.
+ Experimenting with different # of threads would be a good idea.
+*/
+template <class whichmutex, class scoped>
+class Slack : public ThreadedTest<17> {
+public:
+ Slack() : m("slack") {
+ k = 0;
+ done = false;
+ a = b = 0;
+ locks = 0;
+ }
- /* This test is to see how long it takes to get a lock after there has been contention -- the OS
- will need to reschedule us. if a spinlock, it will be fast of course, but these aren't spin locks.
- Experimenting with different # of threads would be a good idea.
- */
- template <class whichmutex, class scoped>
- class Slack : public ThreadedTest<17> {
- public:
- Slack() : m("slack") {
- k = 0;
- done = false;
- a = b = 0;
- locks = 0;
- }
- private:
- whichmutex m;
- char pad1[128];
- unsigned a, b;
- char pad2[128];
- unsigned locks;
- char pad3[128];
- volatile int k;
-
- virtual void validate() {
- if( once++ == 0 ) {
- // <= 1.35 we use a different rwmutex impl so worth noting
- cout << "Boost version : " << BOOST_VERSION << endl;
+private:
+ whichmutex m;
+ char pad1[128];
+ unsigned a, b;
+ char pad2[128];
+ unsigned locks;
+ char pad3[128];
+ volatile int k;
+
+ virtual void validate() {
+ if (once++ == 0) {
+ // <= 1.35 we use a different rwmutex impl so worth noting
+ cout << "Boost version : " << BOOST_VERSION << endl;
+ }
+ cout << typeid(whichmutex).name() << " Slack useful work fraction: " << ((double)a) / b
+ << " locks:" << locks << endl;
+ }
+ void watch() {
+ while (1) {
+ b++;
+ //__sync_synchronize();
+ if (k) {
+ a++;
}
- cout << typeid(whichmutex).name() <<
- " Slack useful work fraction: " << ((double)a)/b << " locks:" << locks << endl;
+ sleepmillis(0);
+ if (done)
+ break;
}
- void watch() {
- while( 1 ) {
- b++;
- //__sync_synchronize();
- if( k ) {
- a++;
- }
- sleepmillis(0);
- if( done )
- break;
- }
+ }
+ volatile bool done;
+ virtual void subthread(int x) {
+ if (x == 1) {
+ watch();
+ return;
}
- volatile bool done;
- virtual void subthread(int x) {
- if( x == 1 ) {
- watch();
- return;
- }
- Timer t;
- unsigned lks = 0;
- while( 1 ) {
- scoped lk(m);
- k = 1;
- // not very long, we'd like to simulate about 100K locks per second
- sleepalittle();
- lks++;
- if( done || t.millis() > 1500 ) {
- locks += lks;
- k = 0;
- break;
- }
+ Timer t;
+ unsigned lks = 0;
+ while (1) {
+ scoped lk(m);
+ k = 1;
+ // not very long, we'd like to simulate about 100K locks per second
+ sleepalittle();
+ lks++;
+ if (done || t.millis() > 1500) {
+ locks += lks;
k = 0;
- //__sync_synchronize();
+ break;
}
- done = true;
+ k = 0;
+ //__sync_synchronize();
}
- };
+ done = true;
+ }
+};
- class CondSlack : public ThreadedTest<17> {
- Notification n;
- public:
- CondSlack() {
- k = 0;
- done = false;
- a = b = 0;
- locks = 0;
+class CondSlack : public ThreadedTest<17> {
+ Notification n;
+
+public:
+ CondSlack() {
+ k = 0;
+ done = false;
+ a = b = 0;
+ locks = 0;
+ }
+
+private:
+ unsigned a, b;
+ virtual void validate() {
+ cout << "CondSlack useful work fraction: " << ((double)a) / b << " locks:" << locks << endl;
+ }
+ unsigned locks;
+ volatile int k;
+ void watch() {
+ while (1) {
+ b++;
+ if (k) {
+ a++;
+ }
+ sleepmillis(0);
+ if (done)
+ break;
}
- private:
- unsigned a, b;
- virtual void validate() {
- cout << "CondSlack useful work fraction: " << ((double)a)/b << " locks:" << locks << endl;
+ }
+ volatile bool done;
+ virtual void subthread(int x) {
+ if (x == 1) {
+ n.notifyOne();
+ watch();
+ return;
}
- unsigned locks;
- volatile int k;
- void watch() {
- while( 1 ) {
- b++;
- if( k ) {
- a++;
- }
- sleepmillis(0);
- if( done )
- break;
- }
+ Timer t;
+ while (1) {
+ n.waitToBeNotified();
+ verify(k == 0);
+ k = 1;
+ // not very long, we'd like to simulate about 100K locks per second
+ sleepalittle();
+ k = 0;
+ locks++;
+ n.notifyOne();
+ if (done || t.millis() > 1500)
+ break;
}
- volatile bool done;
- virtual void subthread(int x) {
- if( x == 1 ) {
- n.notifyOne();
- watch();
- return;
- }
+ done = true;
+ }
+};
+
+const int WriteLocksAreGreedy_ThreadCount = 3;
+class WriteLocksAreGreedy : public ThreadedTest<WriteLocksAreGreedy_ThreadCount> {
+public:
+ WriteLocksAreGreedy() : m("gtest"), _barrier(WriteLocksAreGreedy_ThreadCount) {}
+
+private:
+ RWLock m;
+ boost::barrier _barrier;
+ virtual void validate() {}
+ virtual void subthread(int x) {
+ _barrier.wait();
+ int Z = 0;
+ Client::initThread("utest");
+ if (x == 1) {
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1" << endl;
+ rwlock_shared lk(m);
+ sleepmillis(400);
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
+ }
+ if (x == 2) {
+ sleepmillis(100);
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2" << endl;
+ rwlock lk(m, true);
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
+ }
+ if (x == 3) {
+ sleepmillis(200);
Timer t;
- while( 1 ) {
- n.waitToBeNotified();
- verify( k == 0 );
- k = 1;
- // not very long, we'd like to simulate about 100K locks per second
- sleepalittle();
- k = 0;
- locks++;
- n.notifyOne();
- if( done || t.millis() > 1500 )
- break;
- }
- done = true;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3" << endl;
+ rwlock_shared lk(m);
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
+ LOG(Z) << t.millis() << endl;
+ ASSERT(t.millis() > 50);
}
- };
-
- const int WriteLocksAreGreedy_ThreadCount = 3;
- class WriteLocksAreGreedy : public ThreadedTest<WriteLocksAreGreedy_ThreadCount> {
- public:
- WriteLocksAreGreedy() : m("gtest"), _barrier(WriteLocksAreGreedy_ThreadCount) {}
- private:
- RWLock m;
- boost::barrier _barrier;
- virtual void validate() { }
- virtual void subthread(int x) {
- _barrier.wait();
- int Z = 0;
- Client::initThread("utest");
- if( x == 1 ) {
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1" << endl;
- rwlock_shared lk(m);
- sleepmillis(400);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
- }
- if( x == 2 ) {
- sleepmillis(100);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2" << endl;
- rwlock lk(m, true);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
- }
- if( x == 3 ) {
- sleepmillis(200);
- Timer t;
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3" << endl;
- rwlock_shared lk(m);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
- LOG(Z) << t.millis() << endl;
- ASSERT( t.millis() > 50 );
- }
- cc().shutdown();
- }
- };
+ cc().shutdown();
+ }
+};
- // Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but only
- // max _nRooms threads should ever get in at once
- class TicketHolderWaits : public ThreadedTest<10> {
+// Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but only
+// max _nRooms threads should ever get in at once
+class TicketHolderWaits : public ThreadedTest<10> {
+ static const int checkIns = 1000;
+ static const int rooms = 3;
- static const int checkIns = 1000;
- static const int rooms = 3;
+public:
+ TicketHolderWaits() : _hotel(rooms), _tickets(_hotel._nRooms) {}
+private:
+ class Hotel {
public:
- TicketHolderWaits() : _hotel( rooms ), _tickets( _hotel._nRooms ) {}
-
- private:
-
- class Hotel {
- public:
- Hotel( int nRooms ) : _frontDesk( "frontDesk" ), _nRooms( nRooms ), _checkedIn( 0 ), _maxRooms( 0 ) {}
-
- void checkIn(){
- scoped_lock lk( _frontDesk );
- _checkedIn++;
- verify( _checkedIn <= _nRooms );
- if( _checkedIn > _maxRooms ) _maxRooms = _checkedIn;
- }
-
- void checkOut(){
- scoped_lock lk( _frontDesk );
- _checkedIn--;
- verify( _checkedIn >= 0 );
- }
-
- mongo::mutex _frontDesk;
- int _nRooms;
- int _checkedIn;
- int _maxRooms;
- };
+ Hotel(int nRooms) : _frontDesk("frontDesk"), _nRooms(nRooms), _checkedIn(0), _maxRooms(0) {}
- Hotel _hotel;
- TicketHolder _tickets;
-
- virtual void subthread(int x) {
-
- string threadName = ( str::stream() << "ticketHolder" << x );
- Client::initThread( threadName.c_str() );
+ void checkIn() {
+ scoped_lock lk(_frontDesk);
+ _checkedIn++;
+ verify(_checkedIn <= _nRooms);
+ if (_checkedIn > _maxRooms)
+ _maxRooms = _checkedIn;
+ }
- for( int i = 0; i < checkIns; i++ ){
+ void checkOut() {
+ scoped_lock lk(_frontDesk);
+ _checkedIn--;
+ verify(_checkedIn >= 0);
+ }
- _tickets.waitForTicket();
- TicketHolderReleaser whenDone( &_tickets );
+ mongo::mutex _frontDesk;
+ int _nRooms;
+ int _checkedIn;
+ int _maxRooms;
+ };
- _hotel.checkIn();
+ Hotel _hotel;
+ TicketHolder _tickets;
- sleepalittle();
- if( i == checkIns - 1 ) sleepsecs( 2 );
+ virtual void subthread(int x) {
+ string threadName = (str::stream() << "ticketHolder" << x);
+ Client::initThread(threadName.c_str());
- _hotel.checkOut();
+ for (int i = 0; i < checkIns; i++) {
+ _tickets.waitForTicket();
+ TicketHolderReleaser whenDone(&_tickets);
- if( ( i % ( checkIns / 10 ) ) == 0 )
- mongo::unittest::log() << "checked in " << i << " times..." << endl;
+ _hotel.checkIn();
- }
+ sleepalittle();
+ if (i == checkIns - 1)
+ sleepsecs(2);
- cc().shutdown();
+ _hotel.checkOut();
+ if ((i % (checkIns / 10)) == 0)
+ mongo::unittest::log() << "checked in " << i << " times..." << endl;
}
- virtual void validate() {
-
- // This should always be true, assuming that it takes < 1 sec for the hardware to process a check-out/check-in
- // Time for test is then ~ #threads / _nRooms * 2 seconds
- verify( _hotel._maxRooms == _hotel._nRooms );
-
- }
+ cc().shutdown();
+ }
- };
+ virtual void validate() {
+ // This should always be true, assuming that it takes < 1 sec for the hardware to process a check-out/check-in
+ // Time for test is then ~ #threads / _nRooms * 2 seconds
+ verify(_hotel._maxRooms == _hotel._nRooms);
+ }
+};
- class All : public Suite {
- public:
- All() : Suite( "threading" ) { }
+class All : public Suite {
+public:
+ All() : Suite("threading") {}
- void setupTests() {
- add< WriteLocksAreGreedy >();
+ void setupTests() {
+ add<WriteLocksAreGreedy>();
- // Slack is a test to see how long it takes for another thread to pick up
- // and begin work after another relinquishes the lock. e.g. a spin lock
- // would have very little slack.
- add< Slack<mongo::mutex , mongo::mutex::scoped_lock > >();
- add< Slack<SimpleMutex,SimpleMutex::scoped_lock> >();
- add< Slack<SimpleRWLock,SimpleRWLock::Exclusive> >();
- add< CondSlack >();
+ // Slack is a test to see how long it takes for another thread to pick up
+ // and begin work after another relinquishes the lock. e.g. a spin lock
+ // would have very little slack.
+ add<Slack<mongo::mutex, mongo::mutex::scoped_lock>>();
+ add<Slack<SimpleMutex, SimpleMutex::scoped_lock>>();
+ add<Slack<SimpleRWLock, SimpleRWLock::Exclusive>>();
+ add<CondSlack>();
- add< UpgradableTest >();
+ add<UpgradableTest>();
- add< IsAtomicWordAtomic<AtomicUInt32> >();
- add< IsAtomicWordAtomic<AtomicUInt64> >();
- add< MVarTest >();
- add< ThreadPoolTest >();
+ add<IsAtomicWordAtomic<AtomicUInt32>>();
+ add<IsAtomicWordAtomic<AtomicUInt64>>();
+ add<MVarTest>();
+ add<ThreadPoolTest>();
- add< RWLockTest1 >();
- add< RWLockTest2 >();
- add< RWLockTest3 >();
- add< RWLockTest4 >();
+ add<RWLockTest1>();
+ add<RWLockTest2>();
+ add<RWLockTest3>();
+ add<RWLockTest4>();
- add< MongoMutexTest >();
- add< TicketHolderWaits >();
- }
- };
+ add<MongoMutexTest>();
+ add<TicketHolderWaits>();
+ }
+};
- SuiteInstance<All> myall;
+SuiteInstance<All> myall;
}
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index 4d2f527c54a..78606c784a4 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -45,2002 +45,2069 @@
namespace UpdateTests {
- using std::auto_ptr;
- using std::numeric_limits;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- class ClientBase {
- public:
- ClientBase() : _client(&_txn) {
- _prevError = mongo::lastError._get( false );
- mongo::lastError.release();
- mongo::lastError.reset( new LastError() );
- }
- virtual ~ClientBase() {
- mongo::lastError.reset( _prevError );
- }
-
- protected:
- void insert( const char *ns, BSONObj o ) {
- _client.insert( ns, o );
- }
- void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
- _client.update( ns, Query( q ), o, upsert );
- }
- bool error() {
- return !_client.getPrevError().getField( "err" ).isNull();
- }
-
- OperationContextImpl _txn;
- DBDirectClient _client;
-
- private:
- LastError* _prevError;
- };
-
- class Fail : public ClientBase {
- public:
- virtual ~Fail() {}
- void run() {
- prep();
- ASSERT( !error() );
- doIt();
- ASSERT( error() );
- }
- protected:
- const char *ns() { return "unittests.UpdateTests_Fail"; }
- virtual void prep() {
- insert( ns(), fromjson( "{a:1}" ) );
- }
- virtual void doIt() = 0;
- };
-
- class ModId : public Fail {
- void doIt() {
- update( ns(), BSONObj(), fromjson( "{$set:{'_id':4}}" ) );
- }
- };
-
- class ModNonmodMix : public Fail {
- void doIt() {
- update( ns(), BSONObj(), fromjson( "{$set:{a:4},z:3}" ) );
- }
- };
-
- class InvalidMod : public Fail {
- void doIt() {
- update( ns(), BSONObj(), fromjson( "{$awk:{a:4}}" ) );
- }
- };
-
- class ModNotFirst : public Fail {
- void doIt() {
- update( ns(), BSONObj(), fromjson( "{z:3,$set:{a:4}}" ) );
- }
- };
-
- class ModDuplicateFieldSpec : public Fail {
- void doIt() {
- update( ns(), BSONObj(), fromjson( "{$set:{a:4},$inc:{a:1}}" ) );
- }
- };
-
- class IncNonNumber : public Fail {
- void doIt() {
- update( ns(), BSONObj(), fromjson( "{$inc:{a:'d'}}" ) );
- }
- };
-
- class PushAllNonArray : public Fail {
- void doIt() {
- insert( ns(), fromjson( "{a:[1]}" ) );
- update( ns(), BSONObj(), fromjson( "{$pushAll:{a:'d'}}" ) );
- }
- };
-
- class PullAllNonArray : public Fail {
- void doIt() {
- insert( ns(), fromjson( "{a:[1]}" ) );
- update( ns(), BSONObj(), fromjson( "{$pullAll:{a:'d'}}" ) );
- }
- };
-
- class IncTargetNonNumber : public Fail {
- void doIt() {
- insert( ns(), BSON( "a" << "a" ) );
- update( ns(), BSON( "a" << "a" ), fromjson( "{$inc:{a:1}}" ) );
- }
- };
-
- class SetBase : public ClientBase {
- public:
- ~SetBase() {
- _client.dropCollection( ns() );
- }
- protected:
- const char *ns() { return "unittests.updatetests.SetBase"; }
- };
-
- class SetNum : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "a" << 1 ) );
- _client.update( ns(), BSON( "a" << 1 ), BSON( "$set" << BSON( "a" << 4 ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 4 ) ).isEmpty() );
- }
- };
-
- class SetString : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "a" << "b" ) );
- _client.update( ns(), BSON( "a" << "b" ), BSON( "$set" << BSON( "a" << "c" ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a" << "c" ) ).isEmpty() );
- }
- };
-
- class SetStringDifferentLength : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "a" << "b" ) );
- _client.update( ns(), BSON( "a" << "b" ), BSON( "$set" << BSON( "a" << "cd" ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a" << "cd" ) ).isEmpty() );
- }
- };
-
- class SetStringToNum : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "a" << "b" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << 5 ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 5 ) ).isEmpty() );
- }
- };
-
- class SetStringToNumInPlace : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "a" << "bcd" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << 5.0 ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 5.0 ) ).isEmpty() );
- }
- };
-
- class SetOnInsertFromEmpty : public SetBase {
- public:
- void run() {
- // Try with upsert false first.
- _client.insert( ns(), BSONObj() /* empty document */);
- _client.update( ns(), Query(), BSON( "$setOnInsert" << BSON( "a" << 1 ) ), false );
- ASSERT( _client.findOne( ns(), BSON( "a" << 1 ) ).isEmpty() );
-
- // Then with upsert true.
- _client.update( ns(), Query(), BSON( "$setOnInsert" << BSON( "a" << 1 ) ), true );
- ASSERT( _client.findOne( ns(), BSON( "a" << 1 ) ).isEmpty() );
-
- }
- };
-
- class SetOnInsertFromNonExistent : public SetBase {
- public:
- void run() {
- // Try with upsert false first.
- _client.update( ns(), Query(), BSON( "$setOnInsert" << BSON( "a" << 1 ) ), false );
- ASSERT( _client.findOne( ns(), BSON( "a" << 1 ) ).isEmpty() );
-
- // Then with upsert true.
- _client.update( ns(), Query(), BSON( "$setOnInsert" << BSON( "a" << 1 ) ), true );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 1 ) ).isEmpty() );
-
- }
- };
-
- class SetOnInsertFromNonExistentWithQuery : public SetBase {
- public:
- void run() {
- Query q("{a:1}");
-
- // Try with upsert false first.
- _client.update( ns(), q, BSON( "$setOnInsert" << BSON( "b" << 1 ) ), false );
- ASSERT( _client.findOne( ns(), BSON( "a" << 1 ) ).isEmpty() );
-
- // Then with upsert true.
- _client.update( ns(), q, BSON( "$setOnInsert" << BSON( "b" << 1 ) ), true );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 1 << "b" << 1) ).isEmpty() );
-
- }
- };
-
- class SetOnInsertFromNonExistentWithQueryOverField : public SetBase {
- public:
- void run() {
- Query q("{a:1}"); // same field that we'll setOnInsert on
-
- // Try with upsert false first.
- _client.update( ns(), q, BSON( "$setOnInsert" << BSON( "a" << 2 ) ), false );
- ASSERT( _client.findOne( ns(), BSON( "a" << 1 ) ).isEmpty() );
-
- // Then with upsert true.
- _client.update( ns(), q, BSON( "$setOnInsert" << BSON( "a" << 2 ) ), true );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 2 ) ).isEmpty() );
-
- }
- };
-
- class SetOnInsertMissingField : public SetBase {
- public:
- void run() {
- BSONObj res = fromjson("{'_id':0, a:1}");
- _client.insert( ns(), res );
- _client.update( ns(), Query(), BSON( "$setOnInsert" << BSON( "b" << 1 ) ) );
- ASSERT( _client.findOne( ns(), BSON( "a" << 1 ) ).woCompare( res ) == 0 );
- }
- };
-
- class SetOnInsertExisting : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "a" << 1 ) );
- _client.update( ns(), Query(), BSON( "$setOnInsert" << BSON( "a" << 2 ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 1 ) ).isEmpty() );
- }
- };
-
- class SetOnInsertMixed : public SetBase {
- public:
- void run() {
- // Try with upsert false first.
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << 1 ) <<
- "$setOnInsert" << BSON( "b" << 2 ) ), false );
- ASSERT( _client.findOne( ns(), BSON( "a" << 1 << "b" << 2 ) ).isEmpty() );
-
- // Then with upsert true.
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << 1 ) <<
- "$setOnInsert" << BSON( "b" << 2 ) ), true );
- ASSERT( !_client.findOne( ns(), BSON( "a" << 1 << "b" << 2 ) ).isEmpty() );
- }
- };
-
- class SetOnInsertMissingParent : public SetBase {
- public:
- void run() {
- // In a mod that uses dontApply, we should be careful not to create a
- // parent unneccesarily.
- BSONObj initial = fromjson( "{'_id':0}" );
- BSONObj final = fromjson( "{'_id':0, d:1}" );
- _client.insert( ns(), initial );
- _client.update( ns(), initial, BSON( "$setOnInsert" << BSON( "a.b" << 1 ) <<
- "$set" << BSON( "d" << 1 ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), initial ), final );
- }
- };
-
- class ModDotted : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{a:{b:4}}" ) );
- _client.update( ns(), Query(), BSON( "$inc" << BSON( "a.b" << 10 ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a.b" << 14 ) ).isEmpty() );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 55 ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a.b" << 55 ) ).isEmpty() );
- }
- };
-
- class SetInPlaceDotted : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{a:{b:'cdef'}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "llll" ) ) );
- ASSERT( !_client.findOne( ns(), BSON( "a.b" << "llll" ) ).isEmpty() );
- }
- };
-
- class SetRecreateDotted : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:'cdef'}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
- ASSERT( _client.findOne( ns(), BSON( "a.b" << "lllll" ) ).woCompare( fromjson( "{'_id':0,a:{b:'lllll'}}" ) ) == 0 );
- }
- };
-
- class SetMissingDotted : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), BSONObj(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
- ASSERT( _client.findOne( ns(), BSON( "a.b" << "lllll" ) ).woCompare( fromjson( "{'_id':0,a:{b:'lllll'}}" ) ) == 0 );
- }
- };
-
- class SetAdjacentDotted : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{c:4}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
- ASSERT_EQUALS(
- mutablebson::unordered( _client.findOne( ns(), BSON( "a.b" << "lllll" ) ) ),
- mutablebson::unordered( fromjson( "{'_id':0,a:{b:'lllll',c:4}}" ) ) );
- }
- };
-
- class IncMissing : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), Query(), BSON( "$inc" << BSON( "f" << 3.0 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,f:3}" ) ) == 0 );
- }
- };
-
- class MultiInc : public SetBase {
- public:
-
- string s() {
- stringstream ss;
- auto_ptr<DBClientCursor> cc = _client.query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
- bool first = true;
- while ( cc->more() ) {
- if ( first ) first = false;
- else ss << ",";
-
- BSONObj o = cc->next();
- ss << o["x"].numberInt();
- }
- return ss.str();
- }
-
- void run() {
- _client.insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
- _client.insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
-
- ASSERT_EQUALS( "1,5" , s() );
-
- _client.update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
- ASSERT_EQUALS( "2,5" , s() );
-
- _client.update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) );
- ASSERT_EQUALS( "3,5" , s() );
+using std::auto_ptr;
+using std::numeric_limits;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+class ClientBase {
+public:
+ ClientBase() : _client(&_txn) {
+ _prevError = mongo::lastError._get(false);
+ mongo::lastError.release();
+ mongo::lastError.reset(new LastError());
+ }
+ virtual ~ClientBase() {
+ mongo::lastError.reset(_prevError);
+ }
+
+protected:
+ void insert(const char* ns, BSONObj o) {
+ _client.insert(ns, o);
+ }
+ void update(const char* ns, BSONObj q, BSONObj o, bool upsert = 0) {
+ _client.update(ns, Query(q), o, upsert);
+ }
+ bool error() {
+ return !_client.getPrevError().getField("err").isNull();
+ }
+
+ OperationContextImpl _txn;
+ DBDirectClient _client;
+
+private:
+ LastError* _prevError;
+};
+
+class Fail : public ClientBase {
+public:
+ virtual ~Fail() {}
+ void run() {
+ prep();
+ ASSERT(!error());
+ doIt();
+ ASSERT(error());
+ }
+
+protected:
+ const char* ns() {
+ return "unittests.UpdateTests_Fail";
+ }
+ virtual void prep() {
+ insert(ns(), fromjson("{a:1}"));
+ }
+ virtual void doIt() = 0;
+};
+
+class ModId : public Fail {
+ void doIt() {
+ update(ns(), BSONObj(), fromjson("{$set:{'_id':4}}"));
+ }
+};
+
+class ModNonmodMix : public Fail {
+ void doIt() {
+ update(ns(), BSONObj(), fromjson("{$set:{a:4},z:3}"));
+ }
+};
+
+class InvalidMod : public Fail {
+ void doIt() {
+ update(ns(), BSONObj(), fromjson("{$awk:{a:4}}"));
+ }
+};
+
+class ModNotFirst : public Fail {
+ void doIt() {
+ update(ns(), BSONObj(), fromjson("{z:3,$set:{a:4}}"));
+ }
+};
+
+class ModDuplicateFieldSpec : public Fail {
+ void doIt() {
+ update(ns(), BSONObj(), fromjson("{$set:{a:4},$inc:{a:1}}"));
+ }
+};
+
+class IncNonNumber : public Fail {
+ void doIt() {
+ update(ns(), BSONObj(), fromjson("{$inc:{a:'d'}}"));
+ }
+};
+
+class PushAllNonArray : public Fail {
+ void doIt() {
+ insert(ns(), fromjson("{a:[1]}"));
+ update(ns(), BSONObj(), fromjson("{$pushAll:{a:'d'}}"));
+ }
+};
+
+class PullAllNonArray : public Fail {
+ void doIt() {
+ insert(ns(), fromjson("{a:[1]}"));
+ update(ns(), BSONObj(), fromjson("{$pullAll:{a:'d'}}"));
+ }
+};
+
+class IncTargetNonNumber : public Fail {
+ void doIt() {
+ insert(ns(),
+ BSON("a"
+ << "a"));
+ update(ns(),
+ BSON("a"
+ << "a"),
+ fromjson("{$inc:{a:1}}"));
+ }
+};
+
+class SetBase : public ClientBase {
+public:
+ ~SetBase() {
+ _client.dropCollection(ns());
+ }
+
+protected:
+ const char* ns() {
+ return "unittests.updatetests.SetBase";
+ }
+};
+
+class SetNum : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), BSON("a" << 1));
+ _client.update(ns(), BSON("a" << 1), BSON("$set" << BSON("a" << 4)));
+ ASSERT(!_client.findOne(ns(), BSON("a" << 4)).isEmpty());
+ }
+};
+
+class SetString : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(),
+ BSON("a"
+ << "b"));
+ _client.update(ns(),
+ BSON("a"
+ << "b"),
+ BSON("$set" << BSON("a"
+ << "c")));
+ ASSERT(!_client.findOne(ns(),
+ BSON("a"
+ << "c")).isEmpty());
+ }
+};
+
+class SetStringDifferentLength : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(),
+ BSON("a"
+ << "b"));
+ _client.update(ns(),
+ BSON("a"
+ << "b"),
+ BSON("$set" << BSON("a"
+ << "cd")));
+ ASSERT(!_client.findOne(ns(),
+ BSON("a"
+ << "cd")).isEmpty());
+ }
+};
+
+class SetStringToNum : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(),
+ BSON("a"
+ << "b"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a" << 5)));
+ ASSERT(!_client.findOne(ns(), BSON("a" << 5)).isEmpty());
+ }
+};
+
+class SetStringToNumInPlace : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(),
+ BSON("a"
+ << "bcd"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a" << 5.0)));
+ ASSERT(!_client.findOne(ns(), BSON("a" << 5.0)).isEmpty());
+ }
+};
+
+class SetOnInsertFromEmpty : public SetBase {
+public:
+ void run() {
+ // Try with upsert false first.
+ _client.insert(ns(), BSONObj() /* empty document */);
+ _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), false);
+ ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+
+ // Then with upsert true.
+ _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), true);
+ ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ }
+};
+
+class SetOnInsertFromNonExistent : public SetBase {
+public:
+ void run() {
+ // Try with upsert false first.
+ _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), false);
+ ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+
+ // Then with upsert true.
+ _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), true);
+ ASSERT(!_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ }
+};
+
+class SetOnInsertFromNonExistentWithQuery : public SetBase {
+public:
+ void run() {
+ Query q("{a:1}");
+
+ // Try with upsert false first.
+ _client.update(ns(), q, BSON("$setOnInsert" << BSON("b" << 1)), false);
+ ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+
+ // Then with upsert true.
+ _client.update(ns(), q, BSON("$setOnInsert" << BSON("b" << 1)), true);
+ ASSERT(!_client.findOne(ns(), BSON("a" << 1 << "b" << 1)).isEmpty());
+ }
+};
+
+class SetOnInsertFromNonExistentWithQueryOverField : public SetBase {
+public:
+ void run() {
+ Query q("{a:1}"); // same field that we'll setOnInsert on
+
+ // Try with upsert false first.
+ _client.update(ns(), q, BSON("$setOnInsert" << BSON("a" << 2)), false);
+ ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+
+ // Then with upsert true.
+ _client.update(ns(), q, BSON("$setOnInsert" << BSON("a" << 2)), true);
+ ASSERT(!_client.findOne(ns(), BSON("a" << 2)).isEmpty());
+ }
+};
+
+class SetOnInsertMissingField : public SetBase {
+public:
+ void run() {
+ BSONObj res = fromjson("{'_id':0, a:1}");
+ _client.insert(ns(), res);
+ _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("b" << 1)));
+ ASSERT(_client.findOne(ns(), BSON("a" << 1)).woCompare(res) == 0);
+ }
+};
+
+class SetOnInsertExisting : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), BSON("a" << 1));
+ _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 2)));
+ ASSERT(!_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ }
+};
+
+class SetOnInsertMixed : public SetBase {
+public:
+ void run() {
+ // Try with upsert false first.
+ _client.update(ns(),
+ Query(),
+ BSON("$set" << BSON("a" << 1) << "$setOnInsert" << BSON("b" << 2)),
+ false);
+ ASSERT(_client.findOne(ns(), BSON("a" << 1 << "b" << 2)).isEmpty());
+
+ // Then with upsert true.
+ _client.update(ns(),
+ Query(),
+ BSON("$set" << BSON("a" << 1) << "$setOnInsert" << BSON("b" << 2)),
+ true);
+ ASSERT(!_client.findOne(ns(), BSON("a" << 1 << "b" << 2)).isEmpty());
+ }
+};
+
+class SetOnInsertMissingParent : public SetBase {
+public:
+ void run() {
+ // In a mod that uses dontApply, we should be careful not to create a
+ // parent unneccesarily.
+ BSONObj initial = fromjson("{'_id':0}");
+ BSONObj final = fromjson("{'_id':0, d:1}");
+ _client.insert(ns(), initial);
+ _client.update(
+ ns(), initial, BSON("$setOnInsert" << BSON("a.b" << 1) << "$set" << BSON("d" << 1)));
+ ASSERT_EQUALS(_client.findOne(ns(), initial), final);
+ }
+};
+
+class ModDotted : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{a:{b:4}}"));
+ _client.update(ns(), Query(), BSON("$inc" << BSON("a.b" << 10)));
+ ASSERT(!_client.findOne(ns(), BSON("a.b" << 14)).isEmpty());
+ _client.update(ns(), Query(), BSON("$set" << BSON("a.b" << 55)));
+ ASSERT(!_client.findOne(ns(), BSON("a.b" << 55)).isEmpty());
+ }
+};
+
+class SetInPlaceDotted : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{a:{b:'cdef'}}"));
+ _client.update(ns(),
+ Query(),
+ BSON("$set" << BSON("a.b"
+ << "llll")));
+ ASSERT(!_client.findOne(ns(),
+ BSON("a.b"
+ << "llll")).isEmpty());
+ }
+};
+
+class SetRecreateDotted : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:'cdef'}}"));
+ _client.update(ns(),
+ Query(),
+ BSON("$set" << BSON("a.b"
+ << "lllll")));
+ ASSERT(_client.findOne(ns(),
+ BSON("a.b"
+ << "lllll")).woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) ==
+ 0);
+ }
+};
+
+class SetMissingDotted : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(),
+ BSONObj(),
+ BSON("$set" << BSON("a.b"
+ << "lllll")));
+ ASSERT(_client.findOne(ns(),
+ BSON("a.b"
+ << "lllll")).woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) ==
+ 0);
+ }
+};
+
+class SetAdjacentDotted : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{c:4}}"));
+ _client.update(ns(),
+ Query(),
+ BSON("$set" << BSON("a.b"
+ << "lllll")));
+ ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(),
+ BSON("a.b"
+ << "lllll"))),
+ mutablebson::unordered(fromjson("{'_id':0,a:{b:'lllll',c:4}}")));
+ }
+};
+
+class IncMissing : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(), Query(), BSON("$inc" << BSON("f" << 3.0)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,f:3}")) == 0);
+ }
+};
+
+class MultiInc : public SetBase {
+public:
+ string s() {
+ stringstream ss;
+ auto_ptr<DBClientCursor> cc = _client.query(ns(), Query().sort(BSON("_id" << 1)));
+ bool first = true;
+ while (cc->more()) {
+ if (first)
+ first = false;
+ else
+ ss << ",";
+
+ BSONObj o = cc->next();
+ ss << o["x"].numberInt();
+ }
+ return ss.str();
+ }
+
+ void run() {
+ _client.insert(ns(), BSON("_id" << 1 << "x" << 1));
+ _client.insert(ns(), BSON("_id" << 2 << "x" << 5));
+
+ ASSERT_EQUALS("1,5", s());
+
+ _client.update(ns(), BSON("_id" << 1), BSON("$inc" << BSON("x" << 1)));
+ ASSERT_EQUALS("2,5", s());
+
+ _client.update(ns(), BSONObj(), BSON("$inc" << BSON("x" << 1)));
+ ASSERT_EQUALS("3,5", s());
+
+ _client.update(ns(), BSONObj(), BSON("$inc" << BSON("x" << 1)), false, true);
+ ASSERT_EQUALS("4,6", s());
+ }
+};
+
+class UnorderedNewSet : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("f.g.h" << 3.0 << "f.g.a" << 2.0)));
+ ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), Query())),
+ mutablebson::unordered(fromjson("{'_id':0,f:{g:{a:2,h:3}}}")));
+ }
+};
+
+class UnorderedNewSetAdjacent : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(), BSONObj(), BSON("$set" << BSON("f.g.h.b" << 3.0 << "f.g.a.b" << 2.0)));
+ ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), Query())),
+ mutablebson::unordered(fromjson("{'_id':0,f:{g:{a:{b:2},h:{b:3}}}}")));
+ }
+};
+
+class ArrayEmbeddedSet : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,z:[4,'b']}"));
+ _client.update(ns(),
+ Query(),
+ BSON("$set" << BSON("z.0"
+ << "a")));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,z:['a','b']}"));
+ }
+};
+
+class AttemptEmbedInExistingNum : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:1}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a.b" << 1)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:1}")) == 0);
+ }
+};
+
+class AttemptEmbedConflictsWithOtherSet : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a" << 2 << "a.b" << 1)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0}"));
+ }
+};
+
+class ModMasksEmbeddedConflict : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:2}}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a" << 2 << "a.b" << 1)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:2}}")) == 0);
+ }
+};
+
+class ModOverwritesExistingObject : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:2}}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a" << BSON("c" << 2))));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{c:2}}")) == 0);
+ }
+};
+
+class InvalidEmbeddedSet : public Fail {
+public:
+ virtual void doIt() {
+ _client.update(ns(), Query(), BSON("$set" << BSON("a." << 1)));
+ }
+};
+
+class UpsertMissingEmbedded : public SetBase {
+public:
+ void run() {
+ _client.update(ns(), Query(), BSON("$set" << BSON("a.b" << 1)), true);
+ ASSERT(!_client.findOne(ns(), QUERY("a.b" << 1)).isEmpty());
+ }
+};
+
+class Push : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,5]}"));
+ }
+};
+
+class PushInvalidEltType : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:1}"));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:1}")) == 0);
+ }
+};
+
+class PushConflictsWithOtherMod : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a" << 1) << "$push" << BSON("a" << 5)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1]}")) == 0);
+ }
+};
+
+class PushFromNothing : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[5]}"));
+ }
+};
+
+class PushFromEmpty : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[]}"));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[5]}")) == 0);
+ }
+};
+
+class PushInsideNothing : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a.b" << 5)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:[5]}}")) == 0);
+ }
+};
+
+class CantPushInsideOtherMod : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(
+ ns(), Query(), BSON("$set" << BSON("a" << BSONObj()) << "$push" << BSON("a.b" << 5)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0}")) == 0);
+ }
+};
+
+class CantPushTwice : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[]}"));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << 4) << "$push" << BSON("a" << 5)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[]}")) == 0);
+ }
+};
+
+class SetEncapsulationConflictsWithExistingType : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:4}}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a.b.c" << 4.0)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:4}}")) == 0);
+ }
+};
+
+class CantPushToParent : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:4}}"));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << 4.0)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:4}}")) == 0);
+ }
+};
+
+class PushEachSimple : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
+ // { $push : { a : { $each : [ 2, 3 ] } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}"));
+ }
+};
+
+class PushEachFromEmpty : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[]}"));
+ // { $push : { a : { $each : [ 1, 2, 3 ] } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}"));
+ }
+};
+
+class PushSliceBelowFull : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
+ // { $push : { a : { $each : [ 2 ] , $slice : -3 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -3);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
+ }
+};
+
+class PushSliceReachedFullExact : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
+ // { $push : { a : { $each : [ 2 ] , $slice : -2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
+ }
+};
+
+class PushSliceReachedFullWithEach : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
+ // { $push : { a : { $each : [ 2 , 3 ] , $slice : -2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3) << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ }
+};
+
+class PushSliceReachedFullWithBoth : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : [ 3 ] , $slice : -2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ }
+};
+
+class PushSliceToZero : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}"));
+ }
+};
+
+class PushSliceToZeroFromNothing : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ // { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}"));
+ }
+};
+
+class PushSliceFromNothing : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ // { $push : { a : { $each : [ 1 , 2 ] , $slice : -3 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2) << "$slice" << -3);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
+ }
+};
+
+class PushSliceLongerThanSliceFromNothing : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ // { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ }
+};
+
+class PushSliceFromEmpty : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[]}"));
+ // { $push : { a : { $each : [ 1 ] , $slice : -3 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(1) << "$slice" << -3);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1]}"));
+ }
+};
+
+class PushSliceLongerThanSliceFromEmpty : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[]}"));
+ // { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ }
+};
+
+class PushSliceTwoFields : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2],b:[3,4]}"));
+ // { $push: { a: { $each: [ 5 ] , $slice : -2 }, { b: $each: [ 6 ] , $slice: -1 } } }
+ BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
+ BSONObj objB = BSON("$each" << BSON_ARRAY(6) << "$slice" << -1);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "b" << objB)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[6]}"));
+ }
+};
+
+class PushSliceAndNormal : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2],b:[3]}"));
+ // { $push : { a : { $each : [ 5 ] , $slice : -2 } , { b : 4 } }
+ BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "b" << 4)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[3,4]}"));
+ }
+};
+
+class PushSliceTwoFieldsConflict : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1],b:[3]}"));
+ // { $push: { a: { $each: [ 5 ] , $slice: -2 } , { a: $each: [ 6 ] , $slice: -1 } } }
+ BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
+ BSONObj other = BSON("$each" << BSON_ARRAY(6) << "$slice" << -1);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "a" << other)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0);
+ }
+};
+
+class PushSliceAndNormalConflict : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1],b:[3]}"));
+ // { $push : { a : { $each : [ 5 ] , $slice : -2 } , { a : 4 } } }
+ BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "a" << 4)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0);
+ }
+};
+
+class PushSliceInvalidEachType : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : 3 , $slice : -2 } } }
+ BSONObj pushObj = BSON("$each" << 3 << "$slice" << -2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
+ }
+};
+
+class PushSliceInvalidSliceType : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : [ 3 ], $slice : [ -2 ] } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << BSON_ARRAY(-2));
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
+ }
+};
+
+class PushSliceInvalidSliceValue : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : [ 3 ], $slice : 2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
+ }
+};
+
+
+class PushSliceInvalidSliceDouble : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : [ 3 ], $slice : -2.1 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2.1);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
+ }
+};
+
+class PushSliceValidSliceDouble : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : [ 3 ], $slice : -2.0 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2.0);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ }
+};
+
+class PushSliceInvalidSlice : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}"));
+ // { $push : { a : { $each : [ 3 ], $xxxx : 2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$xxxx" << 2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
+ }
+};
- _client.update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
- ASSERT_EQUALS( "4,6" , s() );
-
- }
- };
-
- class UnorderedNewSet : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "f.g.h" << 3.0 << "f.g.a" << 2.0 ) ) );
- ASSERT_EQUALS(
- mutablebson::unordered( _client.findOne( ns(), Query() ) ),
- mutablebson::unordered( fromjson( "{'_id':0,f:{g:{a:2,h:3}}}" ) ) );
- }
- };
-
- class UnorderedNewSetAdjacent : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), BSONObj(), BSON( "$set" << BSON( "f.g.h.b" << 3.0 << "f.g.a.b" << 2.0 ) ) );
- ASSERT_EQUALS(
- mutablebson::unordered( _client.findOne( ns(), Query() ) ),
- mutablebson::unordered( fromjson( "{'_id':0,f:{g:{a:{b:2},h:{b:3}}}}" ) ) );
- }
- };
-
- class ArrayEmbeddedSet : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,z:[4,'b']}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "z.0" << "a" ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,z:['a','b']}" ) );
- }
- };
-
- class AttemptEmbedInExistingNum : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:1}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 1 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:1}" ) ) == 0 );
- }
- };
-
- class AttemptEmbedConflictsWithOtherSet : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << 2 << "a.b" << 1 ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0}" ) );
- }
- };
-
- class ModMasksEmbeddedConflict : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:2}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << 2 << "a.b" << 1 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:2}}" ) ) == 0 );
- }
- };
-
- class ModOverwritesExistingObject : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:2}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << BSON( "c" << 2 ) ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{c:2}}" ) ) == 0 );
- }
- };
-
- class InvalidEmbeddedSet : public Fail {
- public:
- virtual void doIt() {
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a." << 1 ) ) );
- }
- };
-
- class UpsertMissingEmbedded : public SetBase {
- public:
- void run() {
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 1 ) ), true );
- ASSERT( !_client.findOne( ns(), QUERY( "a.b" << 1 ) ).isEmpty() );
- }
- };
-
- class Push : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,5]}" ) );
- }
- };
-
- class PushInvalidEltType : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:1}" ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:1}" ) ) == 0 );
- }
- };
-
- class PushConflictsWithOtherMod : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << 1 ) <<"$push" << BSON( "a" << 5 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[1]}" ) ) == 0 );
- }
- };
-
- class PushFromNothing : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[5]}" ) );
- }
- };
-
- class PushFromEmpty : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[5]}" ) ) == 0 );
- }
- };
-
- class PushInsideNothing : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a.b" << 5 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:[5]}}" ) ) == 0 );
- }
- };
-
- class CantPushInsideOtherMod : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a" << BSONObj() ) << "$push" << BSON( "a.b" << 5 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0}" ) ) == 0 );
- }
- };
-
- class CantPushTwice : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << 4 ) << "$push" << BSON( "a" << 5 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[]}" ) ) == 0 );
- }
- };
-
- class SetEncapsulationConflictsWithExistingType : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b.c" << 4.0 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
- }
- };
-
- class CantPushToParent : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << 4.0 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
- }
- };
-
- class PushEachSimple : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
- // { $push : { a : { $each : [ 2, 3 ] } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 2 << 3 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,2,3]}" ) );
- }
-
- };
-
- class PushEachFromEmpty : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
- // { $push : { a : { $each : [ 1, 2, 3 ] } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 1 << 2 << 3 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,2,3]}" ) );
- }
-
- };
-
- class PushSliceBelowFull : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
- // { $push : { a : { $each : [ 2 ] , $slice : -3 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 2 ) << "$slice" << -3 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,2]}" ) );
- }
- };
-
- class PushSliceReachedFullExact : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
- // { $push : { a : { $each : [ 2 ] , $slice : -2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 2 ) << "$slice" << -2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,2]}" ) );
- }
- };
-
- class PushSliceReachedFullWithEach : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
- // { $push : { a : { $each : [ 2 , 3 ] , $slice : -2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 2 << 3 ) << "$slice" << -2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[2,3]}" ) );
- }
- };
-
- class PushSliceReachedFullWithBoth : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : [ 3 ] , $slice : -2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 3 ) << "$slice" << -2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[2,3]}" ) );
- }
- };
-
- class PushSliceToZero : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 3 ) << "$slice" << 0 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[]}" ) );
- }
- };
-
- class PushSliceToZeroFromNothing : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- // { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 3 ) << "$slice" << 0 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[]}" ) );
- }
- };
-
- class PushSliceFromNothing : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- // { $push : { a : { $each : [ 1 , 2 ] , $slice : -3 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 1 << 2 ) << "$slice" << -3 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,2]}" ) );
- }
- };
-
- class PushSliceLongerThanSliceFromNothing : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- // { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 1 << 2 << 3 ) << "$slice" << -2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[2,3]}" ) );
- }
- };
-
- class PushSliceFromEmpty : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
- // { $push : { a : { $each : [ 1 ] , $slice : -3 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 1 ) << "$slice" << -3 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1]}" ) );
- }
- };
-
- class PushSliceLongerThanSliceFromEmpty : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
- // { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 1 << 2 << 3 ) << "$slice" << -2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[2,3]}" ) );
- }
- };
-
- class PushSliceTwoFields : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2],b:[3,4]}" ) );
- // { $push: { a: { $each: [ 5 ] , $slice : -2 }, { b: $each: [ 6 ] , $slice: -1 } } }
- BSONObj objA = BSON( "$each" << BSON_ARRAY( 5 ) << "$slice" << -2 );
- BSONObj objB = BSON( "$each" << BSON_ARRAY( 6 ) << "$slice" << -1 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << objA << "b" << objB ) ) );
- ASSERT_EQUALS( _client.findOne( ns(), Query() ) , fromjson("{'_id':0,a:[2,5],b:[6]}"));
- }
- };
-
- class PushSliceAndNormal : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2],b:[3]}" ) );
- // { $push : { a : { $each : [ 5 ] , $slice : -2 } , { b : 4 } }
- BSONObj objA = BSON( "$each" << BSON_ARRAY( 5 ) << "$slice" << -2 );
- _client.update( ns(), Query(), BSON("$push" << BSON("a" << objA << "b" << 4)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()) , fromjson("{'_id':0,a:[2,5],b:[3,4]}"));
- }
- };
-
- class PushSliceTwoFieldsConflict : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1],b:[3]}" ) );
- // { $push: { a: { $each: [ 5 ] , $slice: -2 } , { a: $each: [ 6 ] , $slice: -1 } } }
- BSONObj objA = BSON( "$each" << BSON_ARRAY( 5 ) << "$slice" << -2 );
- BSONObj other = BSON( "$each" << BSON_ARRAY( 6 ) << "$slice" << -1 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << objA << "a" << other ) ) );
- ASSERT(_client.findOne( ns(), Query()).woCompare(fromjson("{'_id':0,a:[1],b:[3]}"))==0);
- }
- };
-
- class PushSliceAndNormalConflict : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1],b:[3]}" ) );
- // { $push : { a : { $each : [ 5 ] , $slice : -2 } , { a : 4 } } }
- BSONObj objA = BSON( "$each" << BSON_ARRAY( 5 ) << "$slice" << -2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << objA << "a" << 4 ) ) );
- ASSERT(_client.findOne( ns(), Query()).woCompare(fromjson("{'_id':0,a:[1],b:[3]}"))==0);
- }
- };
-
- class PushSliceInvalidEachType : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : 3 , $slice : -2 } } }
- BSONObj pushObj = BSON( "$each" << 3 << "$slice" << -2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT( _client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
- }
- };
-
- class PushSliceInvalidSliceType : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : [ 3 ], $slice : [ -2 ] } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY(3) << "$slice" << BSON_ARRAY(-2) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
- }
- };
-
- class PushSliceInvalidSliceValue : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : [ 3 ], $slice : 2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY(3) << "$slice" << 2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
- }
- };
-
-
- class PushSliceInvalidSliceDouble : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : [ 3 ], $slice : -2.1 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY(3) << "$slice" << -2.1 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
- }
- };
-
- class PushSliceValidSliceDouble : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : [ 3 ], $slice : -2.0 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY(3) << "$slice" << -2.0 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT_EQUALS(_client.findOne(ns(), Query()) , fromjson("{'_id':0,a:[2,3]}"));
- }
- };
-
- class PushSliceInvalidSlice : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:[1,2]}" ) );
- // { $push : { a : { $each : [ 3 ], $xxxx : 2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY(3) << "$xxxx" << 2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "a" << pushObj ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0);
- }
- };
-
- //
- // We'd like to test the ability of $push with $sort in the following sequence of tests. We
- // try to enumerate all the possibilities of where the final element would come from: the
- // document, the $push itself, or both.
- //
-
- class PushSortBase : public ClientBase {
- public:
- ~PushSortBase() {
- _client.dropCollection( ns() );
- }
-
- protected:
- enum UpdateType {
- // Sorts ascending and slices the back of the array.
- TOPK_ASC = 0,
-
- // Sorts descending and slices the front of the array.
- TOPK_DESC = 1,
-
- // Sorts ascending and slices the back of the array.
- BOTTOMK_ASC = 2,
-
- // Sorts descending and slices the front of the array.
- BOTTOMK_DESC = 3
- };
-
- const char* ns() {
- return "unittest.updatetests.PushSortBase";
- }
-
- void setParams( const BSONArray& fields,
- const BSONArray& values,
- const BSONArray& sort,
- int size ) {
- _fieldNames = fields;
- _fieldValues = values;
- _sortFields = sort;
- _sliceSize = size;
- }
+//
+// We'd like to test the ability of $push with $sort in the following sequence of tests. We
+// try to enumerate all the possibilities of where the final element would come from: the
+// document, the $push itself, or both.
+//
- /**
- * Generates the update expression portion of an update command given one of the
- * possible types of update.
- */
- BSONObj getUpdate( int updateType ) {
- BSONObjBuilder updateBuilder;
- BSONObjBuilder pushBuilder( updateBuilder.subobjStart( "$push" ) );
- BSONObjBuilder fieldBuilder( pushBuilder.subobjStart( "x" ) );
-
- // Builds $each: [ {a:1,b:1,...}, {a:2,b:2,...}, ... ]
- BSONArrayBuilder eachBuilder( fieldBuilder.subarrayStart( "$each" ) );
- BSONObjIterator itVal( _fieldValues );
- while ( itVal.more() ) {
- BSONObjBuilder eachObjBuilder;
- BSONElement val = itVal.next();
- BSONObjIterator itName( _fieldNames );
- while ( itName.more() ) {
- BSONElement name = itName.next();
- eachObjBuilder.append( name.String(), val.Int() );
- }
- eachBuilder.append( eachObjBuilder.done() );
- }
- eachBuilder.done();
-
- // Builds $slice portion.
- fieldBuilder.append( "$slice",
- updateType < 2 ? -_sliceSize : _sliceSize);
-
- // Builds $sort: <sort pattern> portion
- BSONObjBuilder patternBuilder( fieldBuilder.subobjStart( "$sort" ) );
- BSONObjIterator itSort( _sortFields );
- while ( itSort.more() ) {
- BSONElement sortField = itSort.next();
- patternBuilder.append( sortField.String(),
- updateType%2 ? -1 : 1 );
+class PushSortBase : public ClientBase {
+public:
+ ~PushSortBase() {
+ _client.dropCollection(ns());
+ }
+
+protected:
+ enum UpdateType {
+ // Sorts ascending and slices the back of the array.
+ TOPK_ASC = 0,
+
+ // Sorts descending and slices the front of the array.
+ TOPK_DESC = 1,
+
+ // Sorts ascending and slices the back of the array.
+ BOTTOMK_ASC = 2,
+
+ // Sorts descending and slices the front of the array.
+ BOTTOMK_DESC = 3
+ };
+
+ const char* ns() {
+ return "unittest.updatetests.PushSortBase";
+ }
+
+ void setParams(const BSONArray& fields,
+ const BSONArray& values,
+ const BSONArray& sort,
+ int size) {
+ _fieldNames = fields;
+ _fieldValues = values;
+ _sortFields = sort;
+ _sliceSize = size;
+ }
+
+ /**
+ * Generates the update expression portion of an update command given one of the
+ * possible types of update.
+ */
+ BSONObj getUpdate(int updateType) {
+ BSONObjBuilder updateBuilder;
+ BSONObjBuilder pushBuilder(updateBuilder.subobjStart("$push"));
+ BSONObjBuilder fieldBuilder(pushBuilder.subobjStart("x"));
+
+ // Builds $each: [ {a:1,b:1,...}, {a:2,b:2,...}, ... ]
+ BSONArrayBuilder eachBuilder(fieldBuilder.subarrayStart("$each"));
+ BSONObjIterator itVal(_fieldValues);
+ while (itVal.more()) {
+ BSONObjBuilder eachObjBuilder;
+ BSONElement val = itVal.next();
+ BSONObjIterator itName(_fieldNames);
+ while (itName.more()) {
+ BSONElement name = itName.next();
+ eachObjBuilder.append(name.String(), val.Int());
}
- patternBuilder.done();
-
- fieldBuilder.done();
- pushBuilder.done();
-
- return updateBuilder.obj();
- }
-
- void check( BSONObj expected ) {
- std::cout << expected.toString() << std::endl;
- std::cout << _client.findOne( ns(), Query() ) << std::endl;
- ASSERT( _client.findOne( ns(), Query() ).woCompare( expected ) == 0 );
- }
-
- private:
- BSONArray _fieldNames;
- BSONArray _fieldValues;
- BSONArray _sortFields;
- int _sliceSize;
- };
-
- class PushSortBelowFull : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams( BSON_ARRAY( "a" << "b" ), BSON_ARRAY( 2 ), BSON_ARRAY( "b" ), 3 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-3, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-3, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:3, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:3, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0,x:[{a:1,b:1}]}" ) );
-
- BSONObj result;
- BSONObj expected;
- switch ( i ) {
+ eachBuilder.append(eachObjBuilder.done());
+ }
+ eachBuilder.done();
+
+ // Builds $slice portion.
+ fieldBuilder.append("$slice", updateType < 2 ? -_sliceSize : _sliceSize);
+
+ // Builds $sort: <sort pattern> portion
+ BSONObjBuilder patternBuilder(fieldBuilder.subobjStart("$sort"));
+ BSONObjIterator itSort(_sortFields);
+ while (itSort.more()) {
+ BSONElement sortField = itSort.next();
+ patternBuilder.append(sortField.String(), updateType % 2 ? -1 : 1);
+ }
+ patternBuilder.done();
+
+ fieldBuilder.done();
+ pushBuilder.done();
+
+ return updateBuilder.obj();
+ }
+
+ void check(BSONObj expected) {
+ std::cout << expected.toString() << std::endl;
+ std::cout << _client.findOne(ns(), Query()) << std::endl;
+ ASSERT(_client.findOne(ns(), Query()).woCompare(expected) == 0);
+ }
+
+private:
+ BSONArray _fieldNames;
+ BSONArray _fieldValues;
+ BSONArray _sortFields;
+ int _sliceSize;
+};
+
+class PushSortBelowFull : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2),
+ BSON_ARRAY("b"),
+ 3);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-3, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-3, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:3, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:3, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0,x:[{a:1,b:1}]}"));
+
+ BSONObj result;
+ BSONObj expected;
+ switch (i) {
case TOPK_ASC:
case BOTTOMK_ASC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
+ ASSERT_EQUALS(result, expected);
break;
case TOPK_DESC:
case BOTTOMK_DESC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}" ) ;
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
+ ASSERT_EQUALS(result, expected);
break;
- }
}
}
- };
-
- class PushSortReachedFullExact : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams(BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 ), BSON_ARRAY( "b" ), 2 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0,x:[{a:1,b:1}]}" ) );
-
- BSONObj result;
- BSONObj expected;
- switch (i) {
+ }
+};
+
+class PushSortReachedFullExact : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2),
+ BSON_ARRAY("b"),
+ 2);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0,x:[{a:1,b:1}]}"));
+
+ BSONObj result;
+ BSONObj expected;
+ switch (i) {
case TOPK_ASC:
case BOTTOMK_ASC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
+ ASSERT_EQUALS(result, expected);
break;
case TOPK_DESC:
case BOTTOMK_DESC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
+ ASSERT_EQUALS(result, expected);
break;
- }
}
}
- };
-
- class PushSortReachedFullWithBoth : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams( BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 ), BSON_ARRAY( "b" ), 2 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0,x:[{a:1,b:1},{a:3,b:3}]}" ) );
-
- BSONObj result;
- BSONObj expected;
- switch ( i ) {
+ }
+};
+
+class PushSortReachedFullWithBoth : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2),
+ BSON_ARRAY("b"),
+ 2);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:-2, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:2, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0,x:[{a:1,b:1},{a:3,b:3}]}"));
+
+ BSONObj result;
+ BSONObj expected;
+ switch (i) {
case TOPK_ASC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
+ ASSERT_EQUALS(result, expected);
break;
case TOPK_DESC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
+ ASSERT_EQUALS(result, expected);
break;
case BOTTOMK_ASC:
case BOTTOMK_DESC:
// Implement me.
break;
- }
- }
- }
- };
-
- class PushSortToZero : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams( BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 ), BSON_ARRAY( "b" ), 0 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0,x:[{a:1,b:1},{a:3,b:3}]}" ) );
-
- BSONObj result;
- BSONObj expected;
-
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[]}" );
- ASSERT_EQUALS( result, expected );
- }
- }
- };
-
- class PushSortToZeroFromNothing : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams( BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 ), BSON_ARRAY( "b" ), 0 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
-
- BSONObj result;
- BSONObj expected;
-
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[]}" );
- ASSERT_EQUALS( result, expected );
}
}
- };
-
- class PushSortFromNothing : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams(BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 << 1 ), BSON_ARRAY( "b" ), 2 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // <genarr> = [ {a:2,b:2}, {a:1,b:1} ]
- // Generates the four variations below
- // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
-
- BSONObj result;
- BSONObj expected;
- switch (i) {
+ }
+};
+
+class PushSortToZero : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2),
+ BSON_ARRAY("b"),
+ 0);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0,x:[{a:1,b:1},{a:3,b:3}]}"));
+
+ BSONObj result;
+ BSONObj expected;
+
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[]}");
+ ASSERT_EQUALS(result, expected);
+ }
+ }
+};
+
+class PushSortToZeroFromNothing : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2),
+ BSON_ARRAY("b"),
+ 0);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // TOPK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ {a:2,b:2} ], $slice:0, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0}"));
+
+ BSONObj result;
+ BSONObj expected;
+
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[]}");
+ ASSERT_EQUALS(result, expected);
+ }
+ }
+};
+
+class PushSortFromNothing : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2 << 1),
+ BSON_ARRAY("b"),
+ 2);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // <genarr> = [ {a:2,b:2}, {a:1,b:1} ]
+ // Generates the four variations below
+ // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0}"));
+
+ BSONObj result;
+ BSONObj expected;
+ switch (i) {
case TOPK_ASC:
case BOTTOMK_ASC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
+ ASSERT_EQUALS(result, expected);
break;
case TOPK_DESC:
case BOTTOMK_DESC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
+ ASSERT_EQUALS(result, expected);
break;
- }
}
}
- };
-
- class PushSortLongerThanSliceFromNothing : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams(BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 << 1 << 3), BSON_ARRAY( "b" ), 2 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // <genarr> = [ {a:2,b:2}, {a:1,b:1}, {a:3,b:3} ]
- // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
-
- BSONObj result;
- BSONObj expected;
- switch (i) {
+ }
+};
+
+class PushSortLongerThanSliceFromNothing : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2 << 1 << 3),
+ BSON_ARRAY("b"),
+ 2);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // <genarr> = [ {a:2,b:2}, {a:1,b:1}, {a:3,b:3} ]
+ // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0}"));
+
+ BSONObj result;
+ BSONObj expected;
+ switch (i) {
case TOPK_ASC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
+ ASSERT_EQUALS(result, expected);
break;
case TOPK_DESC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
+ ASSERT_EQUALS(result, expected);
break;
case BOTTOMK_ASC:
case BOTTOMK_DESC:
// Implement me.
break;
- }
}
}
- };
-
- class PushSortFromEmpty : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams(BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 << 1 ), BSON_ARRAY( "b" ), 2 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // <genarr> = [ {a:2,b:2}, {a:1,b:1} ]
- // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0,x:[]}" ) );
-
- BSONObj result;
- BSONObj expected;
- switch (i) {
+ }
+};
+
+class PushSortFromEmpty : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2 << 1),
+ BSON_ARRAY("b"),
+ 2);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // <genarr> = [ {a:2,b:2}, {a:1,b:1} ]
+ // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0,x:[]}"));
+
+ BSONObj result;
+ BSONObj expected;
+ switch (i) {
case TOPK_ASC:
case BOTTOMK_ASC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
+ ASSERT_EQUALS(result, expected);
break;
case TOPK_DESC:
case BOTTOMK_DESC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
+ ASSERT_EQUALS(result, expected);
break;
- }
}
}
- };
-
- class PushSortLongerThanSliceFromEmpty : public PushSortBase {
- public:
- void run() {
- // With the following parameters
- // fields in values in
- // the each array each array field to sort size
- setParams(BSON_ARRAY( "a"<<"b" ), BSON_ARRAY( 2 << 1 << 3), BSON_ARRAY( "b" ), 2 );
-
- // Generates the four variations below (but for now we're only using negative slice).
- // <genarr> = [ {a:2,b:2}, {a:1,b:1}, {a:3,b:3} ]
- // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
- // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
- // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
- // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
-
- for ( int i = 0; i < 2; i++ ) { // i < 4 when we have positive $slice
- _client.dropCollection( ns() );
- _client.insert( ns(), fromjson( "{'_id':0,x:[]}" ) );
-
- BSONObj result;
- BSONObj expected;
- switch (i) {
+ }
+};
+
+class PushSortLongerThanSliceFromEmpty : public PushSortBase {
+public:
+ void run() {
+ // With the following parameters
+ // fields in values in
+ // the each array each array field to sort size
+ setParams(BSON_ARRAY("a"
+ << "b"),
+ BSON_ARRAY(2 << 1 << 3),
+ BSON_ARRAY("b"),
+ 2);
+
+ // Generates the four variations below (but for now we're only using negative slice).
+ // <genarr> = [ {a:2,b:2}, {a:1,b:1}, {a:3,b:3} ]
+ // TOPK_ASC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:1 } } }
+ // TOPK_DESC: $push: { x: { $each: [ <genarray> ], $slice:-2, $sort: { b:-1 } } }
+ // BOTTOMK_ASC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:1 } } }
+ // BOTTOMK_DESC: $push: { x: { $each: [ <genarray> ], $slice:2, $sort: { b:-1 } } }
+
+ for (int i = 0; i < 2; i++) { // i < 4 when we have positive $slice
+ _client.dropCollection(ns());
+ _client.insert(ns(), fromjson("{'_id':0,x:[]}"));
+
+ BSONObj result;
+ BSONObj expected;
+ switch (i) {
case TOPK_ASC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
+ ASSERT_EQUALS(result, expected);
break;
case TOPK_DESC:
- _client.update( ns(), Query(), getUpdate(i) );
- result = _client.findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}" );
- ASSERT_EQUALS( result, expected );
+ _client.update(ns(), Query(), getUpdate(i));
+ result = _client.findOne(ns(), Query());
+ expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
+ ASSERT_EQUALS(result, expected);
break;
case BOTTOMK_ASC:
case BOTTOMK_DESC:
// Implement me.
break;
- }
- }
- }
- };
-
- namespace {
-
- /**
- * Comparator between two BSONObjects that takes in consideration only the keys and
- * direction described in the sort pattern.
- *
- * TODO: This was pulled from update_internal.h, we should verify that these tests work
- * with the new update framework $push sorter.
- */
- struct ProjectKeyCmp {
- BSONObj sortPattern;
-
- ProjectKeyCmp( BSONObj pattern ) : sortPattern( pattern) {}
-
- int operator()( const BSONObj& left, const BSONObj& right ) const {
- BSONObj keyLeft = left.extractFields( sortPattern, true );
- BSONObj keyRight = right.extractFields( sortPattern, true );
- return keyLeft.woCompare( keyRight, sortPattern ) < 0;
- }
- };
-
- } // namespace
-
- class PushSortSortMixed {
- public:
- void run() {
- BSONObj objs[3];
- objs[0] = fromjson( "{a:1, b:1}" );
- objs[1] = fromjson( "{a:3, b:1}" );
- objs[2] = fromjson( "{a:2, b:3}" );
-
- vector<BSONObj> workArea;
- for ( int i = 0; i < 3; i++ ) {
- workArea.push_back( objs[i] );
- }
-
- sort( workArea.begin(), workArea.end(), ProjectKeyCmp( BSON("b" << 1 << "a" << -1) ) );
-
- ASSERT_EQUALS( workArea[0], objs[1] );
- ASSERT_EQUALS( workArea[1], objs[0] );
- ASSERT_EQUALS( workArea[2], objs[2] );
- }
- };
-
- class PushSortSortOutOfOrderFields {
- public:
- void run() {
- BSONObj objs[3];
- objs[0] = fromjson( "{b:1, a:1}" );
- objs[1] = fromjson( "{a:3, b:2}" );
- objs[2] = fromjson( "{b:3, a:2}" );
-
- vector<BSONObj> workArea;
- for ( int i = 0; i < 3; i++ ) {
- workArea.push_back( objs[i] );
- }
-
- sort( workArea.begin(), workArea.end(), ProjectKeyCmp( BSON("a" << 1 << "b" << 1) ) );
-
- ASSERT_EQUALS( workArea[0], objs[0] );
- ASSERT_EQUALS( workArea[1], objs[2] );
- ASSERT_EQUALS( workArea[2], objs[1] );
- }
- };
-
- class PushSortSortExtraFields {
- public:
- void run() {
- BSONObj objs[3];
- objs[0] = fromjson( "{b:1, c:2, a:1}" );
- objs[1] = fromjson( "{c:1, a:3, b:2}" );
- objs[2] = fromjson( "{b:3, a:2}" );
-
- vector<BSONObj> workArea;
- for ( int i = 0; i < 3; i++ ) {
- workArea.push_back( objs[i] );
- }
-
- sort( workArea.begin(), workArea.end(), ProjectKeyCmp( BSON("a" << 1 << "b" << 1) ) );
-
- ASSERT_EQUALS( workArea[0], objs[0] );
- ASSERT_EQUALS( workArea[1], objs[2] );
- ASSERT_EQUALS( workArea[2], objs[1] );
- }
- };
-
- class PushSortSortMissingFields {
- public:
- void run() {
- BSONObj objs[3];
- objs[0] = fromjson( "{a:2, b:2}" );
- objs[1] = fromjson( "{a:1}" );
- objs[2] = fromjson( "{a:3, b:3, c:3}" );
-
- vector<BSONObj> workArea;
- for ( int i = 0; i < 3; i++ ) {
- workArea.push_back( objs[i] );
}
-
- sort( workArea.begin(), workArea.end(), ProjectKeyCmp( BSON("b" << 1 << "c" << 1) ) );
-
- ASSERT_EQUALS( workArea[0], objs[1] );
- ASSERT_EQUALS( workArea[1], objs[0] );
- ASSERT_EQUALS( workArea[2], objs[2] );
- }
- };
-
- class PushSortSortNestedFields {
- public:
- void run() {
- BSONObj objs[3];
- objs[0] = fromjson( "{a:{b:{c:2, d:0}}}" );
- objs[1] = fromjson( "{a:{b:{c:1, d:2}}}" );
- objs[2] = fromjson( "{a:{b:{c:3, d:1}}}" );
-
- vector<BSONObj> workArea;
- for ( int i = 0; i < 3; i++ ) {
- workArea.push_back( objs[i] );
- }
-
- sort( workArea.begin(), workArea.end(), ProjectKeyCmp( fromjson( "{'a.b.d':-1}" ) ) );
-
- ASSERT_EQUALS( workArea[0], objs[1] );
- ASSERT_EQUALS( workArea[1], objs[2] );
- ASSERT_EQUALS( workArea[2], objs[0] );
-
- sort( workArea.begin(), workArea.end(), ProjectKeyCmp( fromjson( "{'a.b':1}" ) ) );
-
- ASSERT_EQUALS( workArea[0], objs[1] );
- ASSERT_EQUALS( workArea[1], objs[0] );
- ASSERT_EQUALS( workArea[2], objs[2] );
-
- }
- };
-
- class PushSortInvalidSortPattern : public SetBase {
- public:
- void run() {
- // Sort pattern validation is made during update command checking. Therefore, to
- // catch bad patterns, we have to write updated that use them.
-
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1}, {a:2}]}" );
- _client.insert( ns(), expected );
-
- // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a..d:1} } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2 <<
- "$sort" << BSON( "a..d" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
-
-
- // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a.:1} } } }
- pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2 <<
- "$sort" << BSON( "a." << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
-
- // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.b:1} } } }
- pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2 <<
- "$sort" << BSON( ".b" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
-
- // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.:1} } } }
- pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2 <<
- "$sort" << BSON( "." << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
-
- // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {'':1} } } }
- pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2 <<
- "$sort" << BSON( "" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortInvalidEachType : public SetBase {
- public:
- void run() {
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1},{a:2}]}" );
- _client.insert( ns(), expected );
- // { $push : { x : { $each : [ 3 ], $slice:-2, $sort : {a:1} } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( 3 ) <<
- "$slice" << -2 <<
- "$sort" << BSON( "a" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortInvalidSortType : public SetBase {
- public:
- void run() {
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1},{a:2}]}" );
- _client.insert( ns(), expected );
- // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : 2} } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2 <<
- "$sort" << 2 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortInvalidSortValue : public SetBase {
- public:
- void run() {
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1},{a:2}]}" );
- _client.insert( ns(), expected );
- // { $push : { x : { $each : [ {a:3} ], $slice:2, $sort : {a:1} } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << 2 <<
- "$sort" << BSON( "a" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortInvalidSortDouble : public SetBase {
- public:
- void run() {
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1},{a:2}]}" );
- _client.insert( ns(), expected );
- // { $push : { x : { $each : [ {a:3} ], $slice:-2.1, $sort : {a:1} } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2.1 <<
- "$sort" << BSON( "a" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortValidSortDouble : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,x:[{a:1},{a:2}]}" ) );
- // { $push : { x : { $each : [ {a:3} ], $slice:-2.0, $sort : {a:1} } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2.0 <<
- "$sort" << BSON( "a" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj expected = fromjson( "{'_id':0,x:[{a:2},{a:3}]}" );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortInvalidSortSort : public SetBase {
- public:
- void run() {
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1},{a:2}]}" );
- _client.insert( ns(), expected );
- // { $push : { x : { $each : [ {a:3} ], $slice:-2.0, $sort : [2, 1] } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2.0 <<
- "$sort" << BSON_ARRAY( 2 << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortInvalidSortSortOrder : public SetBase {
- public:
- void run() {
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1},{a:2}]}" );
- _client.insert( ns(), expected );
- // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a:10} } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 3 ) ) <<
- "$slice" << -2 <<
- "$sort" << BSON( "a" << 10 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
- }
- };
-
- class PushSortInvertedSortAndSlice : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,x:[{a:1},{a:3}]}" ) );
- // { $push : { x : { $each : [ {a:2} ], $sort: {a:1}, $slice:-2 } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 2 ) ) <<
- "$sort" << BSON( "a" << 1 ) <<
- "$slice" << -2.0 );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj expected = fromjson( "{'_id':0,x:[{a:2},{a:3}]}" );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
-
- }
- };
-
- class PushSortInvalidDuplicatedSort : public SetBase {
- public:
- void run() {
- BSONObj expected = fromjson( "{'_id':0,x:[{a:1},{a:3}]}" );
- _client.insert( ns(), expected );
- // { $push : { x : { $each : [ {a:2} ], $sort : {a:1}, $sort: {a:1} } } }
- BSONObj pushObj = BSON( "$each" << BSON_ARRAY( BSON( "a" << 2 ) ) <<
- "$sort" << BSON( "a" << 1 ) <<
- "$sort" << BSON( "a" << 1 ) );
- _client.update( ns(), Query(), BSON( "$push" << BSON( "x" << pushObj ) ) );
- BSONObj result = _client.findOne( ns(), Query() );
- ASSERT_EQUALS( result, expected );
-
- }
- };
-
- class CantIncParent : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
- _client.update( ns(), Query(), BSON( "$inc" << BSON( "a" << 4.0 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
- }
- };
-
- class DontDropEmpty : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:{}}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.c" << 4.0 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:{},c:4}}" ) ) == 0 );
- }
- };
-
- class InsertInEmpty : public SetBase {
- public:
- void run() {
- _client.insert( ns(), fromjson( "{'_id':0,a:{b:{}}}" ) );
- _client.update( ns(), Query(), BSON( "$set" << BSON( "a.b.f" << 4.0 ) ) );
- ASSERT( _client.findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:{f:4}}}" ) ) == 0 );
- }
- };
-
- class IndexParentOfMod : public SetBase {
- public:
- void run() {
- ASSERT_OK(dbtests::createIndex( &_txn, ns(), BSON( "a" << 1 ) ));
- _client.insert( ns(), fromjson( "{'_id':0}" ) );
- _client.update( ns(), Query(), fromjson( "{$set:{'a.b':4}}" ) );
- ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , _client.findOne( ns(), Query() ) );
- ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , _client.findOne( ns(), fromjson( "{'a.b':4}" ) ) ); // make sure the index works
}
- };
-
- class PreserveIdWithIndex : public SetBase { // Not using $set, but base class is still useful
- public:
- void run() {
- _client.insert( ns(), BSON( "_id" << 55 << "i" << 5 ) );
- _client.update( ns(), BSON( "i" << 5 ), BSON( "i" << 6 ) );
- ASSERT( !_client.findOne( ns(), Query( BSON( "_id" << 55 ) ).hint ( "{\"_id\":1}" ) ).isEmpty() );
- }
- };
-
- class CheckNoMods : public SetBase {
- public:
- void run() {
- _client.update( ns(), BSONObj(), BSON( "i" << 5 << "$set" << BSON( "q" << 3 ) ), true );
- ASSERT( error() );
- }
- };
-
- class UpdateMissingToNull : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "a" << 5 ) );
- _client.update( ns(), BSON( "a" << 5 ), fromjson( "{$set:{b:null}}" ) );
- ASSERT_EQUALS( jstNULL, _client.findOne( ns(), QUERY( "a" << 5 ) ).getField( "b" ).type() );
- }
- };
-
- /** SERVER-4777 */
- class TwoModsWithinDuplicatedField : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "_id" << 0 << "a" << 1
- << "x" << BSONObj() << "x" << BSONObj()
- << "z" << 5 ) );
- _client.update( ns(), BSONObj(), BSON( "$set" << BSON( "x.b" << 1 << "x.c" << 1 ) ) );
- ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1
- << "x" << BSON( "b" << 1 << "c" << 1 ) << "x" << BSONObj()
- << "z" << 5 ),
- _client.findOne( ns(), BSONObj() ) );
- }
- };
-
- /** SERVER-4777 */
- class ThreeModsWithinDuplicatedField : public SetBase {
- public:
- void run() {
- _client.insert( ns(),
- BSON( "_id" << 0
- << "x" << BSONObj() << "x" << BSONObj() << "x" << BSONObj() ) );
- _client.update( ns(), BSONObj(),
- BSON( "$set" << BSON( "x.b" << 1 << "x.c" << 1 << "x.d" << 1 ) ) );
- ASSERT_EQUALS( BSON( "_id" << 0
- << "x" << BSON( "b" << 1 << "c" << 1 << "d" << 1 )
- << "x" << BSONObj() << "x" << BSONObj() ),
- _client.findOne( ns(), BSONObj() ) );
- }
- };
-
- class TwoModsBeforeExistingField : public SetBase {
- public:
- void run() {
- _client.insert( ns(), BSON( "_id" << 0 << "x" << 5 ) );
- _client.update( ns(), BSONObj(),
- BSON( "$set" << BSON( "a" << 1 << "b" << 1 << "x" << 10 ) ) );
- ASSERT_EQUALS(
- mutablebson::unordered( BSON( "_id" << 0 << "a" << 1 << "b" << 1 << "x" << 10 ) ),
- mutablebson::unordered( _client.findOne( ns(), BSONObj() ) ) );
- }
- };
-
- namespace basic {
- class Base : public ClientBase {
- protected:
-
- virtual const char * ns() = 0;
- virtual void dotest() = 0;
-
- void insert( const BSONObj& o ) {
- _client.insert( ns() , o );
- }
-
- void update( const BSONObj& m ) {
- _client.update( ns() , BSONObj() , m );
- }
-
- BSONObj findOne() {
- return _client.findOne( ns() , BSONObj() );
- }
-
- void test( const char* initial , const char* mod , const char* after ) {
- test( fromjson( initial ) , fromjson( mod ) , fromjson( after ) );
- }
-
-
- void test( const BSONObj& initial , const BSONObj& mod , const BSONObj& after ) {
- _client.dropCollection( ns() );
- insert( initial );
- update( mod );
- ASSERT_EQUALS( after , findOne() );
- _client.dropCollection( ns() );
- }
-
- public:
-
- Base() {}
- virtual ~Base() {
- }
-
- void run() {
- _client.dropCollection( ns() );
-
- dotest();
-
- _client.dropCollection( ns() );
- }
- };
-
- class SingleTest : public Base {
- virtual BSONObj initial() = 0;
- virtual BSONObj mod() = 0;
- virtual BSONObj after() = 0;
+ }
+};
- void dotest() {
- test( initial() , mod() , after() );
- }
-
- };
-
- class inc1 : public SingleTest {
- virtual BSONObj initial() {
- return BSON( "_id" << 1 << "x" << 1 );
- }
- virtual BSONObj mod() {
- return BSON( "$inc" << BSON( "x" << 2 ) );
- }
- virtual BSONObj after() {
- return BSON( "_id" << 1 << "x" << 3 );
- }
- virtual const char * ns() {
- return "unittests.inc1";
- }
-
- };
-
- class inc2 : public SingleTest {
- virtual BSONObj initial() {
- return BSON( "_id" << 1 << "x" << 1 );
- }
- virtual BSONObj mod() {
- return BSON( "$inc" << BSON( "x" << 2.5 ) );
- }
- virtual BSONObj after() {
- return BSON( "_id" << 1 << "x" << 3.5 );
- }
- virtual const char * ns() {
- return "unittests.inc2";
- }
-
- };
-
- class inc3 : public SingleTest {
- virtual BSONObj initial() {
- return BSON( "_id" << 1 << "x" << 537142123123LL );
- }
- virtual BSONObj mod() {
- return BSON( "$inc" << BSON( "x" << 2 ) );
- }
- virtual BSONObj after() {
- return BSON( "_id" << 1 << "x" << 537142123125LL );
- }
- virtual const char * ns() {
- return "unittests.inc3";
- }
+namespace {
- };
-
- class inc4 : public SingleTest {
- virtual BSONObj initial() {
- return BSON( "_id" << 1 << "x" << 537142123123LL );
- }
- virtual BSONObj mod() {
- return BSON( "$inc" << BSON( "x" << 2LL ) );
- }
- virtual BSONObj after() {
- return BSON( "_id" << 1 << "x" << 537142123125LL );
- }
- virtual const char * ns() {
- return "unittests.inc4";
- }
-
- };
-
- class inc5 : public SingleTest {
- virtual BSONObj initial() {
- return BSON( "_id" << 1 << "x" << 537142123123LL );
- }
- virtual BSONObj mod() {
- return BSON( "$inc" << BSON( "x" << 2.0 ) );
- }
- virtual BSONObj after() {
- return BSON( "_id" << 1 << "x" << 537142123125LL );
- }
- virtual const char * ns() {
- return "unittests.inc5";
- }
-
- };
-
- class inc6 : public Base {
-
- virtual const char * ns() {
- return "unittests.inc6";
- }
-
-
- virtual BSONObj initial() { return BSONObj(); }
- virtual BSONObj mod() { return BSONObj(); }
- virtual BSONObj after() { return BSONObj(); }
-
- void dotest() {
- long long start = numeric_limits<int>::max() - 5;
- long long max = numeric_limits<int>::max() + 5ll;
-
- _client.insert( ns() , BSON( "x" << (int)start ) );
- ASSERT( findOne()["x"].type() == NumberInt );
-
- while ( start < max ) {
- update( BSON( "$inc" << BSON( "x" << 1 ) ) );
- start += 1;
- ASSERT_EQUALS( start , findOne()["x"].numberLong() ); // SERVER-2005
- }
-
- ASSERT( findOne()["x"].type() == NumberLong );
- }
- };
-
- class bit1 : public Base {
- const char * ns() {
- return "unittests.bit1";
- }
- void dotest() {
- test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 ) ) ) , BSON( "_id" << 1 << "x" << ( 3 & 2 ) ) );
- test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$bit" << BSON( "x" << BSON( "or" << 4 ) ) ) , BSON( "_id" << 1 << "x" << ( 1 | 4 ) ) );
- test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 << "or" << 8 ) ) ) , BSON( "_id" << 1 << "x" << ( ( 3 & 2 ) | 8 ) ) );
- test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "or" << 2 << "and" << 8 ) ) ) , BSON( "_id" << 1 << "x" << ( ( 3 | 2 ) & 8 ) ) );
-
- }
- };
-
- class unset : public Base {
- const char * ns() {
- return "unittests.unset";
- }
- void dotest() {
- test( "{_id:1,x:1}" , "{$unset:{x:1}}" , "{_id:1}" );
- }
- };
-
- class setswitchint : public Base {
- const char * ns() {
- return "unittests.int1";
- }
- void dotest() {
- test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$set" << BSON( "x" << 5.6 ) ) , BSON( "_id" << 1 << "x" << 5.6 ) );
- test( BSON( "_id" << 1 << "x" << 5.6 ) , BSON( "$set" << BSON( "x" << 1 ) ) , BSON( "_id" << 1 << "x" << 1 ) );
- }
- };
-
-
- };
+/**
+ * Comparator between two BSONObjects that takes in consideration only the keys and
+ * direction described in the sort pattern.
+ *
+ * TODO: This was pulled from update_internal.h, we should verify that these tests work
+ * with the new update framework $push sorter.
+ */
+struct ProjectKeyCmp {
+ BSONObj sortPattern;
+
+ ProjectKeyCmp(BSONObj pattern) : sortPattern(pattern) {}
+
+ int operator()(const BSONObj& left, const BSONObj& right) const {
+ BSONObj keyLeft = left.extractFields(sortPattern, true);
+ BSONObj keyRight = right.extractFields(sortPattern, true);
+ return keyLeft.woCompare(keyRight, sortPattern) < 0;
+ }
+};
+
+} // namespace
+class PushSortSortMixed {
+public:
+ void run() {
+ BSONObj objs[3];
+ objs[0] = fromjson("{a:1, b:1}");
+ objs[1] = fromjson("{a:3, b:1}");
+ objs[2] = fromjson("{a:2, b:3}");
- class All : public Suite {
- public:
- All() : Suite( "update" ) {
- }
- void setupTests() {
- add< ModId >();
- add< ModNonmodMix >();
- add< InvalidMod >();
- add< ModNotFirst >();
- add< ModDuplicateFieldSpec >();
- add< IncNonNumber >();
- add< PushAllNonArray >();
- add< PullAllNonArray >();
- add< IncTargetNonNumber >();
- add< SetNum >();
- add< SetString >();
- add< SetStringDifferentLength >();
- add< SetStringToNum >();
- add< SetStringToNumInPlace >();
- add< SetOnInsertFromEmpty >();
- add< SetOnInsertFromNonExistent >();
- add< SetOnInsertFromNonExistentWithQuery >();
- add< SetOnInsertFromNonExistentWithQueryOverField >();
- add< SetOnInsertMissingField >();
- add< SetOnInsertExisting >();
- add< SetOnInsertMixed >();
- add< SetOnInsertMissingParent >();
- add< ModDotted >();
- add< SetInPlaceDotted >();
- add< SetRecreateDotted >();
- add< SetMissingDotted >();
- add< SetAdjacentDotted >();
- add< IncMissing >();
- add< MultiInc >();
- add< UnorderedNewSet >();
- add< UnorderedNewSetAdjacent >();
- add< ArrayEmbeddedSet >();
- add< AttemptEmbedInExistingNum >();
- add< AttemptEmbedConflictsWithOtherSet >();
- add< ModMasksEmbeddedConflict >();
- add< ModOverwritesExistingObject >();
- add< InvalidEmbeddedSet >();
- add< UpsertMissingEmbedded >();
- add< Push >();
- add< PushInvalidEltType >();
- add< PushConflictsWithOtherMod >();
- add< PushFromNothing >();
- add< PushFromEmpty >();
- add< PushInsideNothing >();
- add< CantPushInsideOtherMod >();
- add< CantPushTwice >();
- add< SetEncapsulationConflictsWithExistingType >();
- add< CantPushToParent >();
- add< PushEachSimple >();
- add< PushEachFromEmpty >();
- add< PushSliceBelowFull >();
- add< PushSliceReachedFullExact >();
- add< PushSliceReachedFullWithEach >();
- add< PushSliceReachedFullWithBoth >();
- add< PushSliceToZero >();
- add< PushSliceToZeroFromNothing >();
- add< PushSliceFromNothing >();
- add< PushSliceLongerThanSliceFromNothing >();
- add< PushSliceFromEmpty >();
- add< PushSliceLongerThanSliceFromEmpty >();
- add< PushSliceTwoFields >();
- add< PushSliceAndNormal >();
- add< PushSliceTwoFieldsConflict >();
- add< PushSliceAndNormalConflict >();
- add< PushSliceInvalidEachType >();
- add< PushSliceInvalidSliceType >();
- add< PushSliceInvalidSliceValue >();
- add< PushSliceInvalidSliceDouble >();
- add< PushSliceValidSliceDouble >();
- add< PushSliceInvalidSlice >();
- add< PushSortBelowFull >();
- add< PushSortReachedFullExact >();
- add< PushSortReachedFullWithBoth >();
- add< PushSortToZero >();
- add< PushSortToZeroFromNothing >();
- add< PushSortFromNothing >();
- add< PushSortLongerThanSliceFromNothing >();
- add< PushSortFromEmpty >();
- add< PushSortLongerThanSliceFromEmpty >();
- add< PushSortSortMixed >();
- add< PushSortSortOutOfOrderFields >();
- add< PushSortSortExtraFields >();
- add< PushSortSortMissingFields >();
- add< PushSortSortNestedFields >();
- add< PushSortInvalidSortPattern >();
- add< PushSortInvalidEachType >();
- add< PushSortInvalidSortType >();
- add< PushSortInvalidSortValue >();
- add< PushSortInvalidSortDouble >();
- add< PushSortValidSortDouble >();
- add< PushSortInvalidSortSort >();
- add< PushSortInvalidSortSortOrder >();
- add< PushSortInvertedSortAndSlice >();
- add< PushSortInvalidDuplicatedSort >();
- add< CantIncParent >();
- add< DontDropEmpty >();
- add< InsertInEmpty >();
- add< IndexParentOfMod >();
- add< PreserveIdWithIndex >();
- add< CheckNoMods >();
- add< UpdateMissingToNull >();
- add< TwoModsWithinDuplicatedField >();
- add< ThreeModsWithinDuplicatedField >();
- add< TwoModsBeforeExistingField >();
- add< basic::inc1 >();
- add< basic::inc2 >();
- add< basic::inc3 >();
- add< basic::inc4 >();
- add< basic::inc5 >();
- add< basic::inc6 >();
- add< basic::bit1 >();
- add< basic::unset >();
- add< basic::setswitchint >();
+ vector<BSONObj> workArea;
+ for (int i = 0; i < 3; i++) {
+ workArea.push_back(objs[i]);
}
- };
- SuiteInstance<All> myall;
+ sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("b" << 1 << "a" << -1)));
+
+ ASSERT_EQUALS(workArea[0], objs[1]);
+ ASSERT_EQUALS(workArea[1], objs[0]);
+ ASSERT_EQUALS(workArea[2], objs[2]);
+ }
+};
-} // namespace UpdateTests
+class PushSortSortOutOfOrderFields {
+public:
+ void run() {
+ BSONObj objs[3];
+ objs[0] = fromjson("{b:1, a:1}");
+ objs[1] = fromjson("{a:3, b:2}");
+ objs[2] = fromjson("{b:3, a:2}");
+ vector<BSONObj> workArea;
+ for (int i = 0; i < 3; i++) {
+ workArea.push_back(objs[i]);
+ }
+
+ sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("a" << 1 << "b" << 1)));
+
+ ASSERT_EQUALS(workArea[0], objs[0]);
+ ASSERT_EQUALS(workArea[1], objs[2]);
+ ASSERT_EQUALS(workArea[2], objs[1]);
+ }
+};
+
+class PushSortSortExtraFields {
+public:
+ void run() {
+ BSONObj objs[3];
+ objs[0] = fromjson("{b:1, c:2, a:1}");
+ objs[1] = fromjson("{c:1, a:3, b:2}");
+ objs[2] = fromjson("{b:3, a:2}");
+
+ vector<BSONObj> workArea;
+ for (int i = 0; i < 3; i++) {
+ workArea.push_back(objs[i]);
+ }
+
+ sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("a" << 1 << "b" << 1)));
+
+ ASSERT_EQUALS(workArea[0], objs[0]);
+ ASSERT_EQUALS(workArea[1], objs[2]);
+ ASSERT_EQUALS(workArea[2], objs[1]);
+ }
+};
+
+class PushSortSortMissingFields {
+public:
+ void run() {
+ BSONObj objs[3];
+ objs[0] = fromjson("{a:2, b:2}");
+ objs[1] = fromjson("{a:1}");
+ objs[2] = fromjson("{a:3, b:3, c:3}");
+
+ vector<BSONObj> workArea;
+ for (int i = 0; i < 3; i++) {
+ workArea.push_back(objs[i]);
+ }
+
+ sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("b" << 1 << "c" << 1)));
+
+ ASSERT_EQUALS(workArea[0], objs[1]);
+ ASSERT_EQUALS(workArea[1], objs[0]);
+ ASSERT_EQUALS(workArea[2], objs[2]);
+ }
+};
+
+class PushSortSortNestedFields {
+public:
+ void run() {
+ BSONObj objs[3];
+ objs[0] = fromjson("{a:{b:{c:2, d:0}}}");
+ objs[1] = fromjson("{a:{b:{c:1, d:2}}}");
+ objs[2] = fromjson("{a:{b:{c:3, d:1}}}");
+
+ vector<BSONObj> workArea;
+ for (int i = 0; i < 3; i++) {
+ workArea.push_back(objs[i]);
+ }
+
+ sort(workArea.begin(), workArea.end(), ProjectKeyCmp(fromjson("{'a.b.d':-1}")));
+
+ ASSERT_EQUALS(workArea[0], objs[1]);
+ ASSERT_EQUALS(workArea[1], objs[2]);
+ ASSERT_EQUALS(workArea[2], objs[0]);
+
+ sort(workArea.begin(), workArea.end(), ProjectKeyCmp(fromjson("{'a.b':1}")));
+
+ ASSERT_EQUALS(workArea[0], objs[1]);
+ ASSERT_EQUALS(workArea[1], objs[0]);
+ ASSERT_EQUALS(workArea[2], objs[2]);
+ }
+};
+
+class PushSortInvalidSortPattern : public SetBase {
+public:
+ void run() {
+ // Sort pattern validation is made during update command checking. Therefore, to
+ // catch bad patterns, we have to write updated that use them.
+
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1}, {a:2}]}");
+ _client.insert(ns(), expected);
+
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a..d:1} } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
+ << BSON("a..d" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+
+
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a.:1} } } }
+ pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
+ << BSON("a." << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.b:1} } } }
+ pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
+ << BSON(".b" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.:1} } } }
+ pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
+ << BSON("." << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {'':1} } } }
+ pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
+ << BSON("" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvalidEachType : public SetBase {
+public:
+ void run() {
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:2}]}");
+ _client.insert(ns(), expected);
+ // { $push : { x : { $each : [ 3 ], $slice:-2, $sort : {a:1} } } }
+ BSONObj pushObj =
+ BSON("$each" << BSON_ARRAY(3) << "$slice" << -2 << "$sort" << BSON("a" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvalidSortType : public SetBase {
+public:
+ void run() {
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:2}]}");
+ _client.insert(ns(), expected);
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : 2} } }
+ BSONObj pushObj =
+ BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << 2);
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvalidSortValue : public SetBase {
+public:
+ void run() {
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:2}]}");
+ _client.insert(ns(), expected);
+ // { $push : { x : { $each : [ {a:3} ], $slice:2, $sort : {a:1} } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << 2 << "$sort"
+ << BSON("a" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvalidSortDouble : public SetBase {
+public:
+ void run() {
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:2}]}");
+ _client.insert(ns(), expected);
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2.1, $sort : {a:1} } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.1 << "$sort"
+ << BSON("a" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortValidSortDouble : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,x:[{a:1},{a:2}]}"));
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2.0, $sort : {a:1} } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.0 << "$sort"
+ << BSON("a" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvalidSortSort : public SetBase {
+public:
+ void run() {
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:2}]}");
+ _client.insert(ns(), expected);
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2.0, $sort : [2, 1] } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.0 << "$sort"
+ << BSON_ARRAY(2 << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvalidSortSortOrder : public SetBase {
+public:
+ void run() {
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:2}]}");
+ _client.insert(ns(), expected);
+ // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a:10} } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
+ << BSON("a" << 10));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvertedSortAndSlice : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,x:[{a:1},{a:3}]}"));
+ // { $push : { x : { $each : [ {a:2} ], $sort: {a:1}, $slice:-2 } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$slice" << -2.0);
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class PushSortInvalidDuplicatedSort : public SetBase {
+public:
+ void run() {
+ BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:3}]}");
+ _client.insert(ns(), expected);
+ // { $push : { x : { $each : [ {a:2} ], $sort : {a:1}, $sort: {a:1} } } }
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$sort" << BSON("a" << 1));
+ _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
+ BSONObj result = _client.findOne(ns(), Query());
+ ASSERT_EQUALS(result, expected);
+ }
+};
+
+class CantIncParent : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:4}}"));
+ _client.update(ns(), Query(), BSON("$inc" << BSON("a" << 4.0)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:4}}")) == 0);
+ }
+};
+
+class DontDropEmpty : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:{}}}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a.c" << 4.0)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:{},c:4}}")) == 0);
+ }
+};
+
+class InsertInEmpty : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), fromjson("{'_id':0,a:{b:{}}}"));
+ _client.update(ns(), Query(), BSON("$set" << BSON("a.b.f" << 4.0)));
+ ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:{f:4}}}")) == 0);
+ }
+};
+
+class IndexParentOfMod : public SetBase {
+public:
+ void run() {
+ ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1)));
+ _client.insert(ns(), fromjson("{'_id':0}"));
+ _client.update(ns(), Query(), fromjson("{$set:{'a.b':4}}"));
+ ASSERT_EQUALS(fromjson("{'_id':0,a:{b:4}}"), _client.findOne(ns(), Query()));
+ ASSERT_EQUALS(fromjson("{'_id':0,a:{b:4}}"),
+ _client.findOne(ns(), fromjson("{'a.b':4}"))); // make sure the index works
+ }
+};
+
+class PreserveIdWithIndex : public SetBase { // Not using $set, but base class is still useful
+public:
+ void run() {
+ _client.insert(ns(), BSON("_id" << 55 << "i" << 5));
+ _client.update(ns(), BSON("i" << 5), BSON("i" << 6));
+ ASSERT(!_client.findOne(ns(), Query(BSON("_id" << 55)).hint("{\"_id\":1}")).isEmpty());
+ }
+};
+
+class CheckNoMods : public SetBase {
+public:
+ void run() {
+ _client.update(ns(), BSONObj(), BSON("i" << 5 << "$set" << BSON("q" << 3)), true);
+ ASSERT(error());
+ }
+};
+
+class UpdateMissingToNull : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), BSON("a" << 5));
+ _client.update(ns(), BSON("a" << 5), fromjson("{$set:{b:null}}"));
+ ASSERT_EQUALS(jstNULL, _client.findOne(ns(), QUERY("a" << 5)).getField("b").type());
+ }
+};
+
+/** SERVER-4777 */
+class TwoModsWithinDuplicatedField : public SetBase {
+public:
+ void run() {
+ _client.insert(
+ ns(), BSON("_id" << 0 << "a" << 1 << "x" << BSONObj() << "x" << BSONObj() << "z" << 5));
+ _client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1)));
+ ASSERT_EQUALS(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
+ << BSONObj() << "z" << 5),
+ _client.findOne(ns(), BSONObj()));
+ }
+};
+
+/** SERVER-4777 */
+class ThreeModsWithinDuplicatedField : public SetBase {
+public:
+ void run() {
+ _client.insert(
+ ns(), BSON("_id" << 0 << "x" << BSONObj() << "x" << BSONObj() << "x" << BSONObj()));
+ _client.update(
+ ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1)));
+ ASSERT_EQUALS(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
+ << BSONObj() << "x" << BSONObj()),
+ _client.findOne(ns(), BSONObj()));
+ }
+};
+
+class TwoModsBeforeExistingField : public SetBase {
+public:
+ void run() {
+ _client.insert(ns(), BSON("_id" << 0 << "x" << 5));
+ _client.update(ns(), BSONObj(), BSON("$set" << BSON("a" << 1 << "b" << 1 << "x" << 10)));
+ ASSERT_EQUALS(mutablebson::unordered(BSON("_id" << 0 << "a" << 1 << "b" << 1 << "x" << 10)),
+ mutablebson::unordered(_client.findOne(ns(), BSONObj())));
+ }
+};
+
+namespace basic {
+class Base : public ClientBase {
+protected:
+ virtual const char* ns() = 0;
+ virtual void dotest() = 0;
+
+ void insert(const BSONObj& o) {
+ _client.insert(ns(), o);
+ }
+
+ void update(const BSONObj& m) {
+ _client.update(ns(), BSONObj(), m);
+ }
+
+ BSONObj findOne() {
+ return _client.findOne(ns(), BSONObj());
+ }
+
+ void test(const char* initial, const char* mod, const char* after) {
+ test(fromjson(initial), fromjson(mod), fromjson(after));
+ }
+
+
+ void test(const BSONObj& initial, const BSONObj& mod, const BSONObj& after) {
+ _client.dropCollection(ns());
+ insert(initial);
+ update(mod);
+ ASSERT_EQUALS(after, findOne());
+ _client.dropCollection(ns());
+ }
+
+public:
+ Base() {}
+ virtual ~Base() {}
+
+ void run() {
+ _client.dropCollection(ns());
+
+ dotest();
+
+ _client.dropCollection(ns());
+ }
+};
+
+class SingleTest : public Base {
+ virtual BSONObj initial() = 0;
+ virtual BSONObj mod() = 0;
+ virtual BSONObj after() = 0;
+
+ void dotest() {
+ test(initial(), mod(), after());
+ }
+};
+
+class inc1 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON("_id" << 1 << "x" << 1);
+ }
+ virtual BSONObj mod() {
+ return BSON("$inc" << BSON("x" << 2));
+ }
+ virtual BSONObj after() {
+ return BSON("_id" << 1 << "x" << 3);
+ }
+ virtual const char* ns() {
+ return "unittests.inc1";
+ }
+};
+
+class inc2 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON("_id" << 1 << "x" << 1);
+ }
+ virtual BSONObj mod() {
+ return BSON("$inc" << BSON("x" << 2.5));
+ }
+ virtual BSONObj after() {
+ return BSON("_id" << 1 << "x" << 3.5);
+ }
+ virtual const char* ns() {
+ return "unittests.inc2";
+ }
+};
+
+class inc3 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON("_id" << 1 << "x" << 537142123123LL);
+ }
+ virtual BSONObj mod() {
+ return BSON("$inc" << BSON("x" << 2));
+ }
+ virtual BSONObj after() {
+ return BSON("_id" << 1 << "x" << 537142123125LL);
+ }
+ virtual const char* ns() {
+ return "unittests.inc3";
+ }
+};
+
+class inc4 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON("_id" << 1 << "x" << 537142123123LL);
+ }
+ virtual BSONObj mod() {
+ return BSON("$inc" << BSON("x" << 2LL));
+ }
+ virtual BSONObj after() {
+ return BSON("_id" << 1 << "x" << 537142123125LL);
+ }
+ virtual const char* ns() {
+ return "unittests.inc4";
+ }
+};
+
+class inc5 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON("_id" << 1 << "x" << 537142123123LL);
+ }
+ virtual BSONObj mod() {
+ return BSON("$inc" << BSON("x" << 2.0));
+ }
+ virtual BSONObj after() {
+ return BSON("_id" << 1 << "x" << 537142123125LL);
+ }
+ virtual const char* ns() {
+ return "unittests.inc5";
+ }
+};
+
+class inc6 : public Base {
+ virtual const char* ns() {
+ return "unittests.inc6";
+ }
+
+
+ virtual BSONObj initial() {
+ return BSONObj();
+ }
+ virtual BSONObj mod() {
+ return BSONObj();
+ }
+ virtual BSONObj after() {
+ return BSONObj();
+ }
+
+ void dotest() {
+ long long start = numeric_limits<int>::max() - 5;
+ long long max = numeric_limits<int>::max() + 5ll;
+
+ _client.insert(ns(), BSON("x" << (int)start));
+ ASSERT(findOne()["x"].type() == NumberInt);
+
+ while (start < max) {
+ update(BSON("$inc" << BSON("x" << 1)));
+ start += 1;
+ ASSERT_EQUALS(start, findOne()["x"].numberLong()); // SERVER-2005
+ }
+
+ ASSERT(findOne()["x"].type() == NumberLong);
+ }
+};
+
+class bit1 : public Base {
+ const char* ns() {
+ return "unittests.bit1";
+ }
+ void dotest() {
+ test(BSON("_id" << 1 << "x" << 3),
+ BSON("$bit" << BSON("x" << BSON("and" << 2))),
+ BSON("_id" << 1 << "x" << (3 & 2)));
+ test(BSON("_id" << 1 << "x" << 1),
+ BSON("$bit" << BSON("x" << BSON("or" << 4))),
+ BSON("_id" << 1 << "x" << (1 | 4)));
+ test(BSON("_id" << 1 << "x" << 3),
+ BSON("$bit" << BSON("x" << BSON("and" << 2 << "or" << 8))),
+ BSON("_id" << 1 << "x" << ((3 & 2) | 8)));
+ test(BSON("_id" << 1 << "x" << 3),
+ BSON("$bit" << BSON("x" << BSON("or" << 2 << "and" << 8))),
+ BSON("_id" << 1 << "x" << ((3 | 2) & 8)));
+ }
+};
+
+class unset : public Base {
+ const char* ns() {
+ return "unittests.unset";
+ }
+ void dotest() {
+ test("{_id:1,x:1}", "{$unset:{x:1}}", "{_id:1}");
+ }
+};
+
+class setswitchint : public Base {
+ const char* ns() {
+ return "unittests.int1";
+ }
+ void dotest() {
+ test(BSON("_id" << 1 << "x" << 1),
+ BSON("$set" << BSON("x" << 5.6)),
+ BSON("_id" << 1 << "x" << 5.6));
+ test(BSON("_id" << 1 << "x" << 5.6),
+ BSON("$set" << BSON("x" << 1)),
+ BSON("_id" << 1 << "x" << 1));
+ }
+};
+};
+
+
+class All : public Suite {
+public:
+ All() : Suite("update") {}
+ void setupTests() {
+ add<ModId>();
+ add<ModNonmodMix>();
+ add<InvalidMod>();
+ add<ModNotFirst>();
+ add<ModDuplicateFieldSpec>();
+ add<IncNonNumber>();
+ add<PushAllNonArray>();
+ add<PullAllNonArray>();
+ add<IncTargetNonNumber>();
+ add<SetNum>();
+ add<SetString>();
+ add<SetStringDifferentLength>();
+ add<SetStringToNum>();
+ add<SetStringToNumInPlace>();
+ add<SetOnInsertFromEmpty>();
+ add<SetOnInsertFromNonExistent>();
+ add<SetOnInsertFromNonExistentWithQuery>();
+ add<SetOnInsertFromNonExistentWithQueryOverField>();
+ add<SetOnInsertMissingField>();
+ add<SetOnInsertExisting>();
+ add<SetOnInsertMixed>();
+ add<SetOnInsertMissingParent>();
+ add<ModDotted>();
+ add<SetInPlaceDotted>();
+ add<SetRecreateDotted>();
+ add<SetMissingDotted>();
+ add<SetAdjacentDotted>();
+ add<IncMissing>();
+ add<MultiInc>();
+ add<UnorderedNewSet>();
+ add<UnorderedNewSetAdjacent>();
+ add<ArrayEmbeddedSet>();
+ add<AttemptEmbedInExistingNum>();
+ add<AttemptEmbedConflictsWithOtherSet>();
+ add<ModMasksEmbeddedConflict>();
+ add<ModOverwritesExistingObject>();
+ add<InvalidEmbeddedSet>();
+ add<UpsertMissingEmbedded>();
+ add<Push>();
+ add<PushInvalidEltType>();
+ add<PushConflictsWithOtherMod>();
+ add<PushFromNothing>();
+ add<PushFromEmpty>();
+ add<PushInsideNothing>();
+ add<CantPushInsideOtherMod>();
+ add<CantPushTwice>();
+ add<SetEncapsulationConflictsWithExistingType>();
+ add<CantPushToParent>();
+ add<PushEachSimple>();
+ add<PushEachFromEmpty>();
+ add<PushSliceBelowFull>();
+ add<PushSliceReachedFullExact>();
+ add<PushSliceReachedFullWithEach>();
+ add<PushSliceReachedFullWithBoth>();
+ add<PushSliceToZero>();
+ add<PushSliceToZeroFromNothing>();
+ add<PushSliceFromNothing>();
+ add<PushSliceLongerThanSliceFromNothing>();
+ add<PushSliceFromEmpty>();
+ add<PushSliceLongerThanSliceFromEmpty>();
+ add<PushSliceTwoFields>();
+ add<PushSliceAndNormal>();
+ add<PushSliceTwoFieldsConflict>();
+ add<PushSliceAndNormalConflict>();
+ add<PushSliceInvalidEachType>();
+ add<PushSliceInvalidSliceType>();
+ add<PushSliceInvalidSliceValue>();
+ add<PushSliceInvalidSliceDouble>();
+ add<PushSliceValidSliceDouble>();
+ add<PushSliceInvalidSlice>();
+ add<PushSortBelowFull>();
+ add<PushSortReachedFullExact>();
+ add<PushSortReachedFullWithBoth>();
+ add<PushSortToZero>();
+ add<PushSortToZeroFromNothing>();
+ add<PushSortFromNothing>();
+ add<PushSortLongerThanSliceFromNothing>();
+ add<PushSortFromEmpty>();
+ add<PushSortLongerThanSliceFromEmpty>();
+ add<PushSortSortMixed>();
+ add<PushSortSortOutOfOrderFields>();
+ add<PushSortSortExtraFields>();
+ add<PushSortSortMissingFields>();
+ add<PushSortSortNestedFields>();
+ add<PushSortInvalidSortPattern>();
+ add<PushSortInvalidEachType>();
+ add<PushSortInvalidSortType>();
+ add<PushSortInvalidSortValue>();
+ add<PushSortInvalidSortDouble>();
+ add<PushSortValidSortDouble>();
+ add<PushSortInvalidSortSort>();
+ add<PushSortInvalidSortSortOrder>();
+ add<PushSortInvertedSortAndSlice>();
+ add<PushSortInvalidDuplicatedSort>();
+ add<CantIncParent>();
+ add<DontDropEmpty>();
+ add<InsertInEmpty>();
+ add<IndexParentOfMod>();
+ add<PreserveIdWithIndex>();
+ add<CheckNoMods>();
+ add<UpdateMissingToNull>();
+ add<TwoModsWithinDuplicatedField>();
+ add<ThreeModsWithinDuplicatedField>();
+ add<TwoModsBeforeExistingField>();
+ add<basic::inc1>();
+ add<basic::inc2>();
+ add<basic::inc3>();
+ add<basic::inc4>();
+ add<basic::inc5>();
+ add<basic::inc6>();
+ add<basic::bit1>();
+ add<basic::unset>();
+ add<basic::setswitchint>();
+ }
+};
+
+SuiteInstance<All> myall;
+
+} // namespace UpdateTests