/** * Copyright (C) 2018-present MongoDB, Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the Server Side Public License, version 1, * as published by MongoDB, Inc. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * Server Side Public License for more details. * * You should have received a copy of the Server Side Public License * along with this program. If not, see * . * * As a special exception, the copyright holders give permission to link the * code of portions of this program with the OpenSSL library under certain * conditions as described in each individual source file and distribute * linked combinations including the program with the OpenSSL library. You * must comply with the Server Side Public License in all respects for * all of the code used other than as permitted herein. If you modify file(s) * with this exception, you may extend this exception to your version of the * file(s), but you are not obligated to do so. If you do not wish to do so, * delete this exception statement from your version. If you delete this * exception statement from all source files in the program, then also delete * it in the license file. */ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault #include "mongo/platform/basic.h" #include #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index_legacy.h" #include "mongo/db/index_names.h" #include "mongo/db/json.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/storage/mmap_v1/catalog/namespace.h" #include "mongo/db/storage/mmap_v1/catalog/namespace_details.h" #include "mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h" #include "mongo/db/storage/mmap_v1/extent.h" #include "mongo/db/storage/mmap_v1/extent_manager.h" #include "mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h" #include "mongo/db/storage/mmap_v1/record_store_v1_capped.h" #include "mongo/db/storage/mmap_v1/record_store_v1_simple.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/dbtests/dbtests.h" #include "mongo/util/log.h" namespace NamespaceTests { using std::string; const int MinExtentSize = 4096; namespace MissingFieldTests { /** A missing field is represented as null in a btree index. */ class BtreeIndexMissingField { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << 1))); ASSERT_EQUALS(jstNULL, IndexLegacy::getMissingField(&opCtx, NULL, spec).firstElement().type()); } }; /** A missing field is represented as null in a 2d index. */ class TwoDIndexMissingField { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << "2d"))); ASSERT_EQUALS(jstNULL, IndexLegacy::getMissingField(&opCtx, NULL, spec).firstElement().type()); } }; /** A missing field is represented with the hash of null in a hashed index. */ class HashedIndexMissingField { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << "hashed"))); BSONObj nullObj = BSON("a" << BSONNULL); // Call getKeys on the nullObj. BSONObjSet nullFieldKeySet = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); const CollatorInterface* collator = nullptr; ExpressionKeysPrivate::getHashKeys( nullObj, "a", 0, 0, false, collator, &nullFieldKeySet, false); BSONElement nullFieldFromKey = nullFieldKeySet.begin()->firstElement(); ASSERT_EQUALS(ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), 0, 0), nullFieldFromKey.Long()); BSONObj missingField = IndexLegacy::getMissingField(&opCtx, NULL, spec); ASSERT_EQUALS(NumberLong, missingField.firstElement().type()); ASSERT_BSONELT_EQ(nullFieldFromKey, missingField.firstElement()); } }; /** * A missing field is represented with the hash of null in a hashed index. This hash value * depends on the hash seed. */ class HashedIndexMissingFieldAlternateSeed { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; BSONObj spec(BSON("key" << BSON("a" << "hashed") << "seed" << 0x5eed)); BSONObj nullObj = BSON("a" << BSONNULL); BSONObjSet nullFieldKeySet = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); const CollatorInterface* collator = nullptr; ExpressionKeysPrivate::getHashKeys( nullObj, "a", 0x5eed, 0, false, collator, &nullFieldKeySet, false); BSONElement nullFieldFromKey = nullFieldKeySet.begin()->firstElement(); ASSERT_EQUALS(ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), 0x5eed, 0), nullFieldFromKey.Long()); // Ensure that getMissingField recognizes that the seed is different (and returns // the right key). BSONObj missingField = IndexLegacy::getMissingField(&opCtx, NULL, spec); ASSERT_EQUALS(NumberLong, missingField.firstElement().type()); ASSERT_BSONELT_EQ(nullFieldFromKey, missingField.firstElement()); } }; } // namespace MissingFieldTests namespace NamespaceDetailsTests { #if 0 // SERVER-13640 class Base { const char *ns_; Lock::GlobalWrite lk; OldClientContext _context; public: Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {} virtual ~Base() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; if ( !nsd() ) return; _context.db()->dropCollection( &opCtx, ns() ); } protected: void create() { Lock::GlobalWrite lk; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; CollectionOptions collectionOptions; ASSERT_OK(collectionOptions.parse(fromjson(spec()), CollectionOptions::parseForCommand)); ASSERT_OK(userCreateNS(&opCtx, db(), ns(), collectionOptions, false)); } virtual string spec() const = 0; int nRecords() const { int count = 0; const Extent* ext; for ( RecordId extLoc = nsd()->firstExtent(); !extLoc.isNull(); extLoc = ext->xnext) { ext = extentManager()->getExtent(extLoc); int fileNo = ext->firstRecord.a(); if ( fileNo == -1 ) continue; for ( int recOfs = ext->firstRecord.getOfs(); recOfs != RecordId::NullOfs; recOfs = recordStore()->recordFor(RecordId(fileNo, recOfs))->nextOfs() ) { ++count; } } ASSERT_EQUALS( count, nsd()->numRecords() ); return count; } int nExtents() const { int count = 0; for ( RecordId extLoc = nsd()->firstExtent(); !extLoc.isNull(); extLoc = extentManager()->getExtent(extLoc)->xnext ) { ++count; } return count; } const char *ns() const { return ns_; } const NamespaceDetails *nsd() const { Collection* c = collection(); if ( !c ) return NULL; return c->detailsDeprecated(); } const RecordStore* recordStore() const { Collection* c = collection(); if ( !c ) return NULL; return c->getRecordStore(); } Database* db() const { return _context.db(); } const ExtentManager* extentManager() const { return db()->getExtentManager(); } Collection* collection() const { return db()->getCollection( &opCtx, ns() ); } static BSONObj bigObj() { BSONObjBuilder b; b.appendOID("_id", 0, true); string as( 187, 'a' ); b.append( "a", as ); return b.obj(); } }; class Create : public Base { public: void run() { create(); ASSERT( nsd() ); ASSERT_EQUALS( 0, nRecords() ); ASSERT( nsd()->firstExtent() == nsd()->capExtent() ); RecordId initial = RecordId(); initial.setInvalid(); ASSERT( initial == nsd()->capFirstNewRecord() ); } virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; } }; class SingleAlloc : public Base { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); BSONObj b = bigObj(); ASSERT( collection()->insertDocument( &opCtx, b, true ).isOK() ); ASSERT_EQUALS( 1, nRecords() ); } virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; } }; class Realloc : public Base { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); const int N = 20; const int Q = 16; // these constants depend on the size of the bson object, the extent // size allocated by the system too RecordId l[ N ]; for ( int i = 0; i < N; ++i ) { BSONObj b = bigObj(); StatusWith status = ASSERT( collection()->insertDocument( &opCtx, b, true ).isOK() ); l[ i ] = status.getValue(); ASSERT( !l[ i ].isNull() ); ASSERT( nRecords() <= Q ); //ASSERT_EQUALS( 1 + i % 2, nRecords() ); if ( i >= 16 ) ASSERT( l[ i ] == l[ i - Q] ); } } virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; } }; class TwoExtent : public Base { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); ASSERT_EQUALS( 2, nExtents() ); RecordId l[ 8 ]; for ( int i = 0; i < 8; ++i ) { StatusWith status = ASSERT( collection()->insertDocument( &opCtx, bigObj(), true ).isOK() ); l[ i ] = status.getValue(); ASSERT( !l[ i ].isNull() ); //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); //if ( i > 3 ) // ASSERT( l[ i ] == l[ i - 4 ] ); } ASSERT( nRecords() == 8 ); // Too big BSONObjBuilder bob; bob.appendOID( "_id", NULL, true ); bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096 BSONObj bigger = bob.done(); ASSERT( !collection()->insertDocument( &opCtx, bigger, false ).isOK() ); ASSERT_EQUALS( 0, nRecords() ); } private: virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":2}"; } }; BSONObj docForRecordSize( int size ) { BSONObjBuilder b; b.append( "_id", 5 ); b.append( "x", string( size - Record::HeaderSize - 22, 'x' ) ); BSONObj x = b.obj(); ASSERT_EQUALS( Record::HeaderSize + x.objsize(), size ); return x; } /** * alloc() does not quantize records in capped collections. * NB: this actually tests that the code in Database::createCollection doesn't set * PowerOf2Sizes for capped collections. */ class AllocCappedNotQuantized : public Base { public: void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); ASSERT( nsd()->isCapped() ); ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) ); StatusWith result = collection()->insertDocument( &opCtx, docForRecordSize( 300 ), false ); ASSERT( result.isOK() ); Record* record = collection()->getRecordStore()->recordFor( result.getValue() ); // Check that no quantization is performed. ASSERT_EQUALS( 300, record->lengthWithHeaders() ); } virtual string spec() const { return "{capped:true,size:2048}"; } }; /* test NamespaceDetails::cappedTruncateAfter(const char *ns, RecordId loc) */ class TruncateCapped : public Base { virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":2}"; } void pass(int p) { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; create(); ASSERT_EQUALS( 2, nExtents() ); BSONObj b = bigObj(); int N = MinExtentSize / b.objsize() * nExtents() + 5; int T = N - 4; RecordId truncAt; //RecordId l[ 8 ]; for ( int i = 0; i < N; ++i ) { BSONObj bb = bigObj(); StatusWith status = collection()->insertDocument( &opCtx, bb, true ); ASSERT( status.isOK() ); RecordId a = status.getValue(); if( T == i ) truncAt = a; ASSERT( !a.isNull() ); /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); if ( i > 3 ) ASSERT( l[ i ] == l[ i - 4 ] );*/ } ASSERT( nRecords() < N ); RecordId last, first; { unique_ptr runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::BACKWARD)); runner->getNext(NULL, &last); ASSERT( !last.isNull() ); } { unique_ptr runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::FORWARD)); runner->getNext(NULL, &first); ASSERT( !first.isNull() ); ASSERT( first != last ) ; } collection()->cappedTruncateAfter(&opCtx, truncAt, false); ASSERT_EQUALS( collection()->numRecords() , 28u ); { RecordId loc; unique_ptr runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::FORWARD)); runner->getNext(NULL, &loc); ASSERT( first == loc); } { unique_ptr runner(InternalPlanner::collectionScan(&opCtx, ns(), collection(), InternalPlanner::BACKWARD)); RecordId loc; runner->getNext(NULL, &loc); ASSERT( last != loc ); ASSERT( !last.isNull() ); } // Too big BSONObjBuilder bob; bob.appendOID("_id", 0, true); bob.append( "a", string( MinExtentSize + 300, 'a' ) ); BSONObj bigger = bob.done(); ASSERT( !collection()->insertDocument( &opCtx, bigger, true ).isOK() ); ASSERT_EQUALS( 0, nRecords() ); } public: void run() { // log() << "******** NOT RUNNING TruncateCapped test yet ************" << endl; pass(0); } }; #endif // SERVER-13640 #if 0 // XXXXXX - once RecordStore is clean, we can put this back class Migrate : public Base { public: void run() { create(); nsd()->deletedListEntry( 2 ) = nsd()->cappedListOfAllDeletedRecords().drec()-> nextDeleted().drec()->nextDeleted(); nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()-> nextDeleted().writing() = RecordId(); nsd()->cappedLastDelRecLastExtent().Null(); NamespaceDetails *d = nsd(); zero( &d->capExtent() ); zero( &d->capFirstNewRecord() ); // this has a side effect of called NamespaceDetails::cappedCheckMigrate db()->namespaceIndex().details( ns() ); ASSERT( nsd()->firstExtent() == nsd()->capExtent() ); ASSERT( nsd()->capExtent().getOfs() != 0 ); ASSERT( !nsd()->capFirstNewRecord().isValid() ); int nDeleted = 0; for ( RecordId i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted(), ++nDeleted ); ASSERT_EQUALS( 10, nDeleted ); ASSERT( nsd()->cappedLastDelRecLastExtent().isNull() ); } private: static void zero( RecordId *d ) { memset( d, 0, sizeof( RecordId ) ); } virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":10}"; } }; #endif // This isn't a particularly useful test, and because it doesn't clean up // after itself, /tmp/unittest needs to be cleared after running. // class BigCollection : public Base { // public: // BigCollection() : Base( "NamespaceDetailsTests_BigCollection" ) {} // void run() { // create(); // ASSERT_EQUALS( 2, nExtents() ); // } // private: // virtual string spec() const { // // NOTE 256 added to size in _userCreateNS() // long long big = DataFile::maxSize() - DataFileHeader::HeaderSize; // stringstream ss; // ss << "{\"capped\":true,\"size\":" << big << "}"; // return ss.str(); // } // }; #if 0 // SERVER-13640 class SwapIndexEntriesTest : public Base { public: void run() { create(); NamespaceDetails *nsd = collection()->detailsWritable(); const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; // Set 2 & 54 as multikey nsd->setIndexIsMultikey(&opCtx, 2, true); nsd->setIndexIsMultikey(&opCtx, 54, true); ASSERT(nsd->isMultikey(2)); ASSERT(nsd->isMultikey(54)); // Flip 2 & 47 nsd->setIndexIsMultikey(&opCtx, 2, false); nsd->setIndexIsMultikey(&opCtx, 47, true); ASSERT(!nsd->isMultikey(2)); ASSERT(nsd->isMultikey(47)); // Reset entries that are already true nsd->setIndexIsMultikey(&opCtx, 54, true); nsd->setIndexIsMultikey(&opCtx, 47, true); ASSERT(nsd->isMultikey(54)); ASSERT(nsd->isMultikey(47)); // Two non-multi-key nsd->setIndexIsMultikey(&opCtx, 2, false); nsd->setIndexIsMultikey(&opCtx, 43, false); ASSERT(!nsd->isMultikey(2)); ASSERT(nsd->isMultikey(54)); ASSERT(nsd->isMultikey(47)); ASSERT(!nsd->isMultikey(43)); } virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; } }; #endif // SERVER-13640 } // namespace NamespaceDetailsTests namespace DatabaseTests { class RollbackCreateCollection { public: void run() { const string dbName = "rollback_create_collection"; const string committedName = dbName + ".committed"; const string rolledBackName = dbName + ".rolled_back"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; Lock::DBLock lk(&opCtx, dbName, MODE_X); bool justCreated; Database* db = DatabaseHolder::getDatabaseHolder().openDb(&opCtx, dbName, &justCreated); ASSERT(justCreated); Collection* committedColl; { WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(&opCtx, committedName)); committedColl = db->createCollection(&opCtx, committedName); ASSERT_EQUALS(db->getCollection(&opCtx, committedName), committedColl); wunit.commit(); } ASSERT_EQUALS(db->getCollection(&opCtx, committedName), committedColl); { WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(&opCtx, rolledBackName)); Collection* rolledBackColl = db->createCollection(&opCtx, rolledBackName); ASSERT_EQUALS(db->getCollection(&opCtx, rolledBackName), rolledBackColl); // not committing so creation should be rolled back } // The rolledBackCollection creation should have been rolled back ASSERT_FALSE(db->getCollection(&opCtx, rolledBackName)); // The committedCollection should not have been affected by the rollback. Holders // of the original Collection pointer should still be valid. ASSERT_EQUALS(db->getCollection(&opCtx, committedName), committedColl); } }; class RollbackDropCollection { public: void run() { const string dbName = "rollback_drop_collection"; const string droppedName = dbName + ".dropped"; const string rolledBackName = dbName + ".rolled_back"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; Lock::DBLock lk(&opCtx, dbName, MODE_X); bool justCreated; Database* db = DatabaseHolder::getDatabaseHolder().openDb(&opCtx, dbName, &justCreated); ASSERT(justCreated); { WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(&opCtx, droppedName)); Collection* droppedColl; droppedColl = db->createCollection(&opCtx, droppedName); ASSERT_EQUALS(db->getCollection(&opCtx, droppedName), droppedColl); db->dropCollection(&opCtx, droppedName).transitional_ignore(); wunit.commit(); } // Should have been really dropped ASSERT_FALSE(db->getCollection(&opCtx, droppedName)); { WriteUnitOfWork wunit(&opCtx); ASSERT_FALSE(db->getCollection(&opCtx, rolledBackName)); Collection* rolledBackColl = db->createCollection(&opCtx, rolledBackName); wunit.commit(); ASSERT_EQUALS(db->getCollection(&opCtx, rolledBackName), rolledBackColl); db->dropCollection(&opCtx, rolledBackName).transitional_ignore(); // not committing so dropping should be rolled back } // The rolledBackCollection dropping should have been rolled back. // Original Collection pointers are no longer valid. ASSERT(db->getCollection(&opCtx, rolledBackName)); // The droppedCollection should not have been restored by the rollback. ASSERT_FALSE(db->getCollection(&opCtx, droppedName)); } }; } // namespace DatabaseTests class All : public Suite { public: All() : Suite("namespace") {} void setupTests() { add(); add(); add(); add(); // add< NamespaceDetailsTests::Create >(); // add< NamespaceDetailsTests::SingleAlloc >(); // add< NamespaceDetailsTests::Realloc >(); // add< NamespaceDetailsTests::AllocCappedNotQuantized >(); // add< NamespaceDetailsTests::TwoExtent >(); // add< NamespaceDetailsTests::TruncateCapped >(); // add< NamespaceDetailsTests::Migrate >(); // add< NamespaceDetailsTests::SwapIndexEntriesTest >(); // add< NamespaceDetailsTests::BigCollection >(); #if 0 // until ROLLBACK_ENABLED add< DatabaseTests::RollbackCreateCollection >(); add< DatabaseTests::RollbackDropCollection >(); #endif } }; SuiteInstance myall; } // namespace NamespaceTests