summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2010-12-09 14:44:08 -0500
committerMathias Stearn <mathias@10gen.com>2010-12-10 19:20:43 -0500
commit1c17cece76f77fa00e643ce5e4fa85ccebc9f990 (patch)
treeac651dae252641816dbdb9053aa2f76ed8656c89
parent3469206c4bebd4ee66529bb34deaf3ad9cdcfbb3 (diff)
downloadmongo-1c17cece76f77fa00e643ce5e4fa85ccebc9f990.tar.gz
Pull some durability code into a class for easy disabling
-rw-r--r--db/btree.cpp16
-rw-r--r--db/cap.cpp16
-rw-r--r--db/db.cpp5
-rw-r--r--db/dbcommands.cpp2
-rw-r--r--db/dur.cpp78
-rw-r--r--db/dur.h82
-rw-r--r--db/jsobjmanipulator.h10
-rw-r--r--db/mongommf.cpp2
-rw-r--r--db/mongomutex.h2
-rw-r--r--db/namespace.cpp20
-rw-r--r--db/namespace.h8
-rw-r--r--db/oplog.cpp2
-rw-r--r--db/pdfile.cpp70
-rw-r--r--db/pdfile.h4
-rw-r--r--dbtests/btreetests.cpp8
-rw-r--r--dbtests/framework.cpp3
-rw-r--r--dbtests/namespacetests.cpp2
-rw-r--r--dbtests/pdfiletests.cpp20
-rw-r--r--util/hashtab.h4
19 files changed, 191 insertions, 163 deletions
diff --git a/db/btree.cpp b/db/btree.cpp
index 7ebf1e5fae6..b4f36f8f26a 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -43,11 +43,11 @@ namespace mongo {
BtreeBucket* DiskLoc::btreemod() const {
assert( _a != -1 );
BtreeBucket *b = const_cast< BtreeBucket * >( btree() );
- return static_cast< BtreeBucket* >( dur::writingPtr( b, BucketSize ) );
+ return static_cast< BtreeBucket* >( getDur().writingPtr( b, BucketSize ) );
}
_KeyNode& _KeyNode::writing() const {
- return *dur::writing( const_cast< _KeyNode* >( this ) );
+ return *getDur().writing( const_cast< _KeyNode* >( this ) );
}
KeyNode::KeyNode(const BucketBasics& bb, const _KeyNode &k) :
@@ -354,7 +354,7 @@ namespace mongo {
// declare that we will write to [k(keypos),k(n)]
// todo: this writes a medium amount to the journal. we may want to add a verb "shift" to the redo log so
// we can log a very small amount.
- b = (BucketBasics*) dur::writingAtOffset((void *) this, p-(char*)this, q-p);
+ b = (BucketBasics*) getDur().writingAtOffset((void *) this, p-(char*)this, q-p);
// e.g. n==3, keypos==2
// 1 4 9
@@ -364,7 +364,7 @@ namespace mongo {
b->k(j) = b->k(j-1);
}
- dur::declareWriteIntent(&b->emptySize, 12);
+ getDur().declareWriteIntent(&b->emptySize, 12);
b->emptySize -= sizeof(_KeyNode);
b->n++;
@@ -373,7 +373,7 @@ namespace mongo {
kn.recordLoc = recordLoc;
kn.setKeyDataOfs((short) b->_alloc(key.objsize()) );
char *p = b->dataAt(kn.keyDataOfs());
- dur::declareWriteIntent(p, key.objsize());
+ getDur().declareWriteIntent(p, key.objsize());
memcpy(p, key.objdata(), key.objsize());
return true;
}
@@ -1164,7 +1164,7 @@ namespace mongo {
{
const _KeyNode *_kn = &k(keypos);
- _KeyNode *kn = (_KeyNode *) dur::alreadyDeclared((_KeyNode*) _kn); // already declared intent in basicInsert()
+ _KeyNode *kn = (_KeyNode *) getDur().alreadyDeclared((_KeyNode*) _kn); // already declared intent in basicInsert()
if ( keypos+1 == n ) { // last key
if ( nextChild != lchild ) {
out() << "ERROR nextChild != lchild" << endl;
@@ -1195,7 +1195,7 @@ namespace mongo {
assert(false);
}
const DiskLoc *pc = &k(keypos+1).prevChildBucket;
- *dur::alreadyDeclared((DiskLoc*) pc) = rchild; // declared in basicInsert()
+ *getDur().alreadyDeclared((DiskLoc*) pc) = rchild; // declared in basicInsert()
if ( !rchild.isNull() )
rchild.btree()->parent.writing() = thisLoc;
}
@@ -1726,7 +1726,7 @@ namespace mongo {
while( 1 ) {
if( loc.btree()->tempNext().isNull() ) {
// only 1 bucket at this level. we are done.
- dur::writingDiskLoc(idx.head) = loc;
+ getDur().writingDiskLoc(idx.head) = loc;
break;
}
levels++;
diff --git a/db/cap.cpp b/db/cap.cpp
index c36e57c1b58..507a8d81c72 100644
--- a/db/cap.cpp
+++ b/db/cap.cpp
@@ -64,7 +64,7 @@ namespace mongo {
for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted )
drecs.push_back( i );
- dur::writingDiskLoc( cappedFirstDeletedInCurExtent() ) = i;
+ getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = i;
// This is the O(n^2) part.
drecs.sort();
@@ -82,7 +82,7 @@ namespace mongo {
DiskLoc b = *j;
while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
// a & b are adjacent. merge.
- dur::writingInt( a.drec()->lengthWithHeaders ) += b.drec()->lengthWithHeaders;
+ getDur().writingInt( a.drec()->lengthWithHeaders ) += b.drec()->lengthWithHeaders;
j++;
if ( j == drecs.end() ) {
DEBUGGING out() << "temp: compact adddelrec2\n";
@@ -146,20 +146,20 @@ namespace mongo {
// We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
// (or DiskLoc() if new capExtent == firstExtent)
if ( capExtent == lastExtent )
- dur::writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
+ getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
else {
DiskLoc i = cappedFirstDeletedInCurExtent();
for (; !i.isNull() && nextIsInCapExtent( i ); i = i.drec()->nextDeleted );
- dur::writingDiskLoc( cappedLastDelRecLastExtent() ) = i;
+ getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = i;
}
- dur::writingDiskLoc( capExtent ) = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;
+ getDur().writingDiskLoc( capExtent ) = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;
/* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
//dassert( theCapExtent()->ns == ns );
theCapExtent()->assertOk();
- dur::writingDiskLoc( capFirstNewRecord ) = DiskLoc();
+ getDur().writingDiskLoc( capFirstNewRecord ) = DiskLoc();
}
DiskLoc NamespaceDetails::__capAlloc( int len ) {
@@ -191,7 +191,7 @@ namespace mongo {
DiskLoc NamespaceDetails::cappedAlloc(const char *ns, int len) {
// signal done allocating new extents.
if ( !cappedLastDelRecLastExtent().isValid() )
- dur::writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
+ getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
assert( len < 400000000 );
int passes = 0;
@@ -257,7 +257,7 @@ namespace mongo {
// Remember first record allocated on this iteration through capExtent.
if ( capFirstNewRecord.isValid() && capFirstNewRecord.isNull() )
- dur::writingDiskLoc(capFirstNewRecord) = loc;
+ getDur().writingDiskLoc(capFirstNewRecord) = loc;
return loc;
}
diff --git a/db/db.cpp b/db/db.cpp
index f1b16ddffe1..b4ac1141fe0 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -563,7 +563,10 @@ sendmore:
globalScriptEngine->setGetInterruptSpecCallback( jsGetInterruptSpecCallback );
}
- dur::startup();
+ if (cmdLine.dur)
+ enableDurability();
+
+ getDur().startup();
if( cmdLine.durTrace & CmdLine::DurRecoverOnly )
return;
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 90964c6950c..364c8863bfe 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -539,7 +539,7 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForNs(ns);
- d = dur::writing(d);
+ d = getDur().writing(d);
d->aboutToDeleteAnIndex();
/* there may be pointers pointing at keys in the btree(s). kill them. */
diff --git a/db/dur.cpp b/db/dur.cpp
index 198cf1b087a..3761398911e 100644
--- a/db/dur.cpp
+++ b/db/dur.cpp
@@ -39,17 +39,6 @@
*/
#include "pch.h"
-
-#if !defined(_DURABLE)
-
-namespace mongo {
- namespace dur {
- void debugCheckLastDeclaredWrite() { }
- }
-}
-
-#else
-
#include "cmdline.h"
#include "client.h"
#include "dur.h"
@@ -62,37 +51,47 @@ namespace mongo {
using namespace mongoutils;
namespace mongo {
+ DurableInterface* DurableInterface::_impl = new NonDurableImpl();
- namespace dur {
+#if !defined(_DURABLE)
+ void enableDurability() {}
+#else
+
+
+ namespace {
+ using namespace dur;
const bool DebugValidateMapsMatch = false;
const bool DebugCheckLastDeclaredWrite = false;
- static CommitJob commitJob;
+ CommitJob commitJob;
+ }
+
+ void enableDurability() { // TODO: merge with startup() ?
+ assert(typeid(*DurableInterface::_impl) == typeid(NonDurableImpl));
+ // lets NonDurableImpl instance leak, but its tiny and only happens once
+ DurableInterface::_impl = new DurableImpl();
+ }
/** Declare that a file has been created
Normally writes are applied only after journalling, for safety. But here the file
is created first, and the journal will just replay the creation if the create didn't
happen because of crashing.
*/
- void createdFile(string filename, unsigned long long len) {
- if( cmdLine.dur ) {
- shared_ptr<DurOp> op( new FileCreatedOp(filename, len) );
- commitJob.noteOp(op);
- }
+ void DurableImpl::createdFile(string filename, unsigned long long len) {
+ shared_ptr<DurOp> op( new FileCreatedOp(filename, len) );
+ commitJob.noteOp(op);
}
/** declare write intent. when already in the write view if testIntent is true. */
- void declareWriteIntent(void *p, unsigned len) {
- if( cmdLine.dur ) {
- WriteIntent w(p, len);
- commitJob.note(w);
- }
+ void DurableImpl::declareWriteIntent(void *p, unsigned len) {
+ WriteIntent w(p, len);
+ commitJob.note(w);
}
string hexdump(const char *data, unsigned len);
- void* writingPtr(void *x, unsigned len) {
+ void* DurableImpl::writingPtr(void *x, unsigned len) {
void *p = x;
if( testIntent )
p = MongoMMF::switchToPrivateView(x);
@@ -105,7 +104,7 @@ namespace mongo {
@param len the length at ofs we will write
@return new buffer pointer. this is modified when testIntent is true.
*/
- void* writingAtOffset(void *buf, unsigned ofs, unsigned len) {
+ void* DurableImpl::writingAtOffset(void *buf, unsigned ofs, unsigned len) {
char *p = (char *) buf;
if( testIntent )
p = (char *) MongoMMF::switchToPrivateView(buf);
@@ -121,7 +120,7 @@ namespace mongo {
SLOW
*/
- void debugCheckLastDeclaredWrite() {
+ void DurableImpl::debugCheckLastDeclaredWrite() {
if( !DebugCheckLastDeclaredWrite )
return;
@@ -165,6 +164,7 @@ namespace mongo {
}
}
+ namespace dur {
/** we will build an output buffer ourself and then use O_DIRECT
we could be in read lock for this
caller handles locking
@@ -467,19 +467,21 @@ namespace mongo {
void unlinkThread();
void recover();
- void startup() {
- if( !cmdLine.dur )
- return;
- if( testIntent )
- return;
- recover();
- journalMakeDir();
- boost::thread t(durThread);
- boost::thread t2(unlinkThread);
- }
-
} // namespace dur
-} // namespace mongo
+
+ void DurableImpl::startup() {
+ if( !cmdLine.dur )
+ return;
+ if( testIntent )
+ return;
+ recover();
+ journalMakeDir();
+ boost::thread t(durThread);
+ boost::thread t2(unlinkThread);
+ }
#endif
+
+} // namespace mongo
+
diff --git a/db/dur.h b/db/dur.h
index 32f40085c12..4029de4131e 100644
--- a/db/dur.h
+++ b/db/dur.h
@@ -7,39 +7,22 @@
namespace mongo {
- namespace dur {
-
- /** it's very easy to manipulate Record::data open ended. Thus a call to writing(Record*) is suspect.
- this will override the templated version and yield an unresolved external
- */
- Record* writing(Record* r);
-
-#if !defined(_DURABLE)
- inline void startup() { }
- inline void* writingPtr(void *x, unsigned len) { return x; }
- inline DiskLoc& writingDiskLoc(DiskLoc& d) { return d; }
- inline int& writingInt(int& d) { return d; }
- inline Record* writing(Record* r) { return r; }
- template <typename T> inline T* writing(T *x) { return x; }
- inline void assertReading(void *p) { }
- template <typename T> inline T* writingNoLog(T *x) { return x; }
- inline void* writingAtOffset(void *buf, unsigned ofs, unsigned len) { return buf; }
- template <typename T> inline T* alreadyDeclared(T *x) { return x; }
- inline void declareWriteIntent(void *, unsigned) { }
- inline void createdFile(string filename, unsigned long long len) { }
-#else
+ class DurableInterface {
+ protected:
+ DurableInterface() {} // Should only be creating subclasses
+ public:
/** call during startup so durability module can initialize
throws if fatal error
*/
- void startup();
+ virtual void startup() = 0;
/** Declare that a file has been created
Normally writes are applied only after journalling, for safety. But here the file
is created first, and the journal will just replay the creation if the create didn't
happen because of crashing.
*/
- void createdFile(string filename, unsigned long long len);
+ virtual void createdFile(string filename, unsigned long long len) = 0;
/** Declarations of write intent.
@@ -53,19 +36,27 @@ namespace mongo {
/** declare intent to write to x for up to len
@return pointer where to write. this is modified when testIntent is true.
*/
- void* writingPtr(void *x, unsigned len);
+ virtual void* writingPtr(void *x, unsigned len) = 0;
/** declare write intent; should already be in the write view to work correctly when testIntent is true.
if you aren't, use writingPtr() instead.
*/
- void declareWriteIntent(void *x, unsigned len);
+ virtual void declareWriteIntent(void *x, unsigned len) = 0;
/** declare intent to write
@param ofs offset within buf at which we will write
@param len the length at ofs we will write
@return new buffer pointer. this is modified when testIntent is true.
*/
- void* writingAtOffset(void *buf, unsigned ofs, unsigned len);
+ virtual void* writingAtOffset(void *buf, unsigned ofs, unsigned len) = 0;
+
+ virtual void debugCheckLastDeclaredWrite() = 0;
+
+ virtual ~DurableInterface() { assert(!"don't destroy me"); }
+
+ //////////////////////////////
+ // END OF VIRTUAL FUNCTIONS //
+ //////////////////////////////
inline DiskLoc& writingDiskLoc(DiskLoc& d) {
return *((DiskLoc*) writingPtr(&d, sizeof(d)));
@@ -94,6 +85,11 @@ namespace mongo {
return (T*) writingPtr(x, sizeof(T));
}
+ /** it's very easy to manipulate Record::data open ended. Thus a call to writing(Record*) is suspect.
+ this will override the templated version and yield an unresolved external
+ */
+ Record* writing(Record* r);
+
/** declare our intent to write, but it doesn't have to be journaled, as this write is
something 'unimportant'. depending on our implementation, we may or may not be able
to take advantage of this versus doing the normal work we do.
@@ -110,13 +106,39 @@ namespace mongo {
dassert( !testIntent || MongoMMF::switchToPrivateView(p) != p );
}
-#endif
+ private:
+ static DurableInterface* _impl;
- void debugCheckLastDeclaredWrite();
+ // in mongo:: namespace
+ friend DurableInterface& getDur();
+ friend void enableDurability(); // should only be called once at startup
+
+ }; // class DurableInterface
+
+ inline DurableInterface& getDur() { return *DurableInterface::_impl; }
+ void enableDurability();
- } // namespace dur
+ class NonDurableImpl : public DurableInterface {
+ void startup() { }
+ void* writingPtr(void *x, unsigned len) { return x; }
+ void* writingAtOffset(void *buf, unsigned ofs, unsigned len) { return buf; }
+ void declareWriteIntent(void *, unsigned) { }
+ void createdFile(string filename, unsigned long long len) { }
+ void debugCheckLastDeclaredWrite() {}
+ };
+
+#ifdef _DURABLE
+ class DurableImpl : public DurableInterface {
+ void startup();
+ void* writingPtr(void *x, unsigned len);
+ void* writingAtOffset(void *buf, unsigned ofs, unsigned len);
+ void declareWriteIntent(void *, unsigned);
+ void createdFile(string filename, unsigned long long len);
+ void debugCheckLastDeclaredWrite();
+ };
+#endif
/** declare that we are modifying a diskloc and this is a datafile write. */
- inline DiskLoc& DiskLoc::writing() const { return dur::writingDiskLoc(*const_cast< DiskLoc * >( this )); }
+ inline DiskLoc& DiskLoc::writing() const { return getDur().writingDiskLoc(*const_cast< DiskLoc * >( this )); }
}
diff --git a/db/jsobjmanipulator.h b/db/jsobjmanipulator.h
index fed96762f28..2b628cff4e5 100644
--- a/db/jsobjmanipulator.h
+++ b/db/jsobjmanipulator.h
@@ -45,9 +45,9 @@ namespace mongo {
}
void SetNumber(double d) {
if ( _element.type() == NumberDouble )
- *dur::writing( reinterpret_cast< double * >( value() ) ) = d;
+ *getDur().writing( reinterpret_cast< double * >( value() ) ) = d;
else if ( _element.type() == NumberInt )
- *dur::writing( reinterpret_cast< int * >( value() ) ) = (int) d;
+ *getDur().writing( reinterpret_cast< int * >( value() ) ) = (int) d;
else assert(0);
}
void setLong(long long n) {
@@ -56,7 +56,7 @@ namespace mongo {
}
void SetLong(long long n) {
assert( _element.type() == NumberLong );
- *dur::writing( reinterpret_cast< long long * >(value()) ) = n;
+ *getDur().writing( reinterpret_cast< long long * >(value()) ) = n;
}
void setInt(int n) {
assert( _element.type() == NumberInt );
@@ -64,7 +64,7 @@ namespace mongo {
}
void SetInt(int n) {
assert( _element.type() == NumberInt );
- dur::writingInt( *reinterpret_cast< int * >( value() ) ) = n;
+ getDur().writingInt( *reinterpret_cast< int * >( value() ) ) = n;
}
@@ -82,7 +82,7 @@ namespace mongo {
int valsize = e.valuesize();
int ofs = (int) (v-d);
dassert( ofs > 0 );
- char *p = (char *) dur::writingPtr(d, valsize + ofs);
+ char *p = (char *) getDur().writingPtr(d, valsize + ofs);
*p = e.type();
memcpy( p + ofs, e.value(), valsize );
}
diff --git a/db/mongommf.cpp b/db/mongommf.cpp
index 06f36db3d8a..e7dfd2ceda5 100644
--- a/db/mongommf.cpp
+++ b/db/mongommf.cpp
@@ -170,7 +170,7 @@ namespace mongo {
bool preExisting = MemoryMappedFile::exists(fname.c_str());
_view_write = map(fname.c_str(), len, sequentialHint ? SEQUENTIAL : 0);
if( cmdLine.dur && !testIntent && _view_write && !preExisting ) {
- dur::createdFile(fname, len);
+ getDur().createdFile(fname, len);
}
return finishOpening();
}
diff --git a/db/mongomutex.h b/db/mongomutex.h
index 2119d803628..72cd4098f85 100644
--- a/db/mongomutex.h
+++ b/db/mongomutex.h
@@ -214,7 +214,7 @@ namespace mongo {
inline void MongoMutex::_releasedWriteLock() {
#if defined(_DURABLE) && defined(_DEBUG)
- dur::debugCheckLastDeclaredWrite();
+ getDur().debugCheckLastDeclaredWrite();
#endif
}
diff --git a/db/namespace.cpp b/db/namespace.cpp
index 873c54ee541..6bb8ab08686 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -187,7 +187,7 @@ namespace mongo {
}
void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
- dur::assertReading(this);
+ getDur().assertReading(this);
BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
#if defined(_DEBUG) && !defined(_DURABLE)
@@ -196,7 +196,7 @@ namespace mongo {
}
#endif
{
- Record *r = (Record *) dur::writingPtr(d, sizeof(Record));
+ Record *r = (Record *) getDur().writingPtr(d, sizeof(Record));
d = &r->asDeleted();
// defensive code: try to make us notice if we reference a deleted record
(unsigned&) (r->data) = 0xeeeeeeee;
@@ -207,7 +207,7 @@ namespace mongo {
// Initial extent allocation. Insert at end.
d->nextDeleted = DiskLoc();
if ( cappedListOfAllDeletedRecords().isNull() )
- dur::writingDiskLoc( cappedListOfAllDeletedRecords() ) = dloc;
+ getDur().writingDiskLoc( cappedListOfAllDeletedRecords() ) = dloc;
else {
DiskLoc i = cappedListOfAllDeletedRecords();
for (; !i.drec()->nextDeleted.isNull(); i = i.drec()->nextDeleted )
@@ -216,14 +216,14 @@ namespace mongo {
}
} else {
d->nextDeleted = cappedFirstDeletedInCurExtent();
- dur::writingDiskLoc( cappedFirstDeletedInCurExtent() ) = dloc;
+ getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = dloc;
// always compact() after this so order doesn't matter
}
} else {
int b = bucket(d->lengthWithHeaders);
DiskLoc& list = deletedList[b];
DiskLoc oldHead = list;
- dur::writingDiskLoc(list) = dloc;
+ getDur().writingDiskLoc(list) = dloc;
d->nextDeleted = oldHead;
}
}
@@ -236,7 +236,7 @@ namespace mongo {
return loc;
DeletedRecord *r = loc.drec();
- r = dur::writing(r);
+ r = getDur().writing(r);
/* note we want to grab from the front so our next pointers on disk tend
to go in a forward direction which is important for performance. */
@@ -261,7 +261,7 @@ namespace mongo {
DiskLoc newDelLoc = loc;
newDelLoc.inc(lenToAlloc);
DeletedRecord *newDel = DataFileMgr::makeDeletedRecord(newDelLoc, left);
- DeletedRecord *newDelW = dur::writing(newDel);
+ DeletedRecord *newDelW = getDur().writing(newDel);
newDelW->extentOfs = r->extentOfs;
newDelW->lengthWithHeaders = left;
newDelW->nextDeleted.Null();
@@ -338,8 +338,8 @@ namespace mongo {
/* unlink ourself from the deleted list */
{
- DeletedRecord *bmr = dur::writing(bestmatch.drec());
- *dur::writing(bestprev) = bmr->nextDeleted;
+ DeletedRecord *bmr = getDur().writing(bestmatch.drec());
+ *getDur().writing(bestprev) = bmr->nextDeleted;
bmr->nextDeleted.setInvalid(); // defensive.
assert(bmr->extentOfs < bestmatch.getOfs());
}
@@ -473,7 +473,7 @@ namespace mongo {
id = &idx(nIndexes,false);
}
- (*dur::writing(&nIndexes))++;
+ (*getDur().writing(&nIndexes))++;
if ( resetTransient )
NamespaceDetailsTransient::get_w(thisns).addedIndex();
return *id;
diff --git a/db/namespace.h b/db/namespace.h
index db4bb38209c..be730d44746 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -266,13 +266,13 @@ namespace mongo {
dassert( i < NIndexesMax );
unsigned long long x = ((unsigned long long) 1) << i;
if( multiKeyIndexBits & x ) return;
- *dur::writing(&multiKeyIndexBits) |= x;
+ *getDur().writing(&multiKeyIndexBits) |= x;
}
void clearIndexIsMultikey(int i) {
dassert( i < NIndexesMax );
unsigned long long x = ((unsigned long long) 1) << i;
if( (multiKeyIndexBits & x) == 0 ) return;
- *dur::writing(&multiKeyIndexBits) &= ~x;
+ *getDur().writing(&multiKeyIndexBits) &= ~x;
}
/* add a new index. does not add to system.indexes etc. - just to NamespaceDetails.
@@ -288,12 +288,12 @@ namespace mongo {
void paddingFits() {
double x = paddingFactor - 0.01;
if ( x >= 1.0 )
- *dur::writingNoLog(&paddingFactor) = x;
+ *getDur().writingNoLog(&paddingFactor) = x;
}
void paddingTooSmall() {
double x = paddingFactor + 0.6;
if ( x <= 2.0 )
- *dur::writingNoLog(&paddingFactor) = x;
+ *getDur().writingNoLog(&paddingFactor) = x;
}
// @return offset in indexes[]
diff --git a/db/oplog.cpp b/db/oplog.cpp
index aebb3819d75..d8c005f3d98 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -241,7 +241,7 @@ namespace mongo {
{
const int size2 = obj.objsize() + 1 + 2;
- char *p = (char *) dur::writingPtr(r->data, size2+posz);
+ char *p = (char *) getDur().writingPtr(r->data, size2+posz);
memcpy(p, partial.objdata(), posz);
*((unsigned *)p) += size2;
p += posz - 1;
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 4c121bfa6b5..83db825487e 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -261,7 +261,7 @@ namespace mongo {
}
if ( mx > 0 )
- dur::writingInt( d->max ) = mx;
+ getDur().writingInt( d->max ) = mx;
return true;
}
@@ -403,10 +403,10 @@ namespace mongo {
if ( details ) {
assert( !details->lastExtent.isNull() );
assert( !details->firstExtent.isNull() );
- dur::writingDiskLoc(e->xprev) = details->lastExtent;
- dur::writingDiskLoc(details->lastExtent.ext()->xnext) = eloc;
+ getDur().writingDiskLoc(e->xprev) = details->lastExtent;
+ getDur().writingDiskLoc(details->lastExtent.ext()->xnext) = eloc;
assert( !eloc.isNull() );
- dur::writingDiskLoc(details->lastExtent) = eloc;
+ getDur().writingDiskLoc(details->lastExtent) = eloc;
}
else {
ni->add_ns(ns, eloc, capped);
@@ -414,7 +414,7 @@ namespace mongo {
}
{
- NamespaceDetails *dw = dur::writing(details);
+ NamespaceDetails *dw = getDur().writing(details);
dw->lastExtentSize = e->length;
}
details->addDeletedRec(emptyLoc.drec(), emptyLoc);
@@ -438,12 +438,12 @@ namespace mongo {
}
int offset = header()->unused.getOfs();
- DataFileHeader *h = dur::writing(header());
+ DataFileHeader *h = getDur().writing(header());
h->unused.set( fileNo, offset + ExtentSize );
h->unusedLength -= ExtentSize;
loc.set(fileNo, offset);
Extent *e = _getExtent(loc);
- DiskLoc emptyLoc = dur::writing(e)->init(ns, ExtentSize, fileNo, offset);
+ DiskLoc emptyLoc = getDur().writing(e)->init(ns, ExtentSize, fileNo, offset);
addNewExtentToNamespace(ns, e, loc, emptyLoc, newCapped);
@@ -517,7 +517,7 @@ namespace mongo {
/*---------------------------------------------------------------------*/
DiskLoc Extent::reuse(const char *nsname) {
- return dur::writing(this)->_reuse(nsname);
+ return getDur().writing(this)->_reuse(nsname);
}
DiskLoc Extent::_reuse(const char *nsname) {
log(3) << "reset extent was:" << nsDiagnostic.toString() << " now:" << nsname << '\n';
@@ -534,7 +534,7 @@ namespace mongo {
int delRecLength = length - (_extentData - (char *) this);
DeletedRecord *empty = DataFileMgr::makeDeletedRecord(emptyLoc, delRecLength);//(DeletedRecord *) getRecord(emptyLoc);
- empty = dur::writing(empty);
+ empty = getDur().writing(empty);
empty->lengthWithHeaders = delRecLength;
empty->extentOfs = myLoc.getOfs();
empty->nextDeleted.Null();
@@ -557,7 +557,7 @@ namespace mongo {
emptyLoc.inc( (int) (_extentData-(char*)this) );
int l = _length - (_extentData - (char *) this);
- DeletedRecord *empty = dur::writing( DataFileMgr::makeDeletedRecord(emptyLoc, l) );
+ DeletedRecord *empty = getDur().writing( DataFileMgr::makeDeletedRecord(emptyLoc, l) );
empty->lengthWithHeaders = l;
empty->extentOfs = myLoc.getOfs();
return emptyLoc;
@@ -752,11 +752,11 @@ namespace mongo {
else {
DiskLoc a = freeExtents->firstExtent;
assert( a.ext()->xprev.isNull() );
- dur::writingDiskLoc( a.ext()->xprev ) = d->lastExtent;
- dur::writingDiskLoc( d->lastExtent.ext()->xnext ) = a;
- dur::writingDiskLoc( freeExtents->firstExtent ) = d->firstExtent;
- dur::writingDiskLoc( d->firstExtent ).setInvalid();
- dur::writingDiskLoc( d->lastExtent ).setInvalid();
+ getDur().writingDiskLoc( a.ext()->xprev ) = d->lastExtent;
+ getDur().writingDiskLoc( d->lastExtent.ext()->xnext ) = a;
+ getDur().writingDiskLoc( freeExtents->firstExtent ) = d->firstExtent;
+ getDur().writingDiskLoc( d->firstExtent ).setInvalid();
+ getDur().writingDiskLoc( d->lastExtent ).setInvalid();
}
}
@@ -844,14 +844,14 @@ namespace mongo {
/* remove ourself from the record next/prev chain */
{
if ( todelete->prevOfs != DiskLoc::NullOfs )
- dur::writingInt( todelete->getPrev(dl).rec()->nextOfs ) = todelete->nextOfs;
+ getDur().writingInt( todelete->getPrev(dl).rec()->nextOfs ) = todelete->nextOfs;
if ( todelete->nextOfs != DiskLoc::NullOfs )
- dur::writingInt( todelete->getNext(dl).rec()->prevOfs ) = todelete->prevOfs;
+ getDur().writingInt( todelete->getNext(dl).rec()->prevOfs ) = todelete->prevOfs;
}
/* remove ourself from extent pointers */
{
- Extent *e = dur::writing( todelete->myExtent(dl) );
+ Extent *e = getDur().writing( todelete->myExtent(dl) );
if ( e->firstRecord == dl ) {
if ( todelete->nextOfs == DiskLoc::NullOfs )
e->firstRecord.Null();
@@ -869,7 +869,7 @@ namespace mongo {
/* add to the free list */
{
{
- NamespaceDetails::Stats *s = dur::writing(&d->stats);
+ NamespaceDetails::Stats *s = getDur().writing(&d->stats);
s->datasize -= todelete->netLength();
s->nrecords--;
}
@@ -880,12 +880,12 @@ namespace mongo {
to this disk location. so an incorrectly done remove would cause
a lot of problems.
*/
- memset(dur::writingPtr(todelete, todelete->lengthWithHeaders), 0, todelete->lengthWithHeaders);
+ memset(getDur().writingPtr(todelete, todelete->lengthWithHeaders), 0, todelete->lengthWithHeaders);
}
else {
DEV {
unsigned long long *p = (unsigned long long *) todelete->data;
- *dur::writing(p) = 0;
+ *getDur().writing(p) = 0;
//DEV memset(todelete->data, 0, todelete->netLength()); // attempt to notice invalid reuse.
}
d->addDeletedRec((DeletedRecord*)todelete, dl);
@@ -1002,7 +1002,7 @@ namespace mongo {
// update in place
int sz = objNew.objsize();
- memcpy(dur::writingPtr(toupdate->data, sz), objNew.objdata(), sz);
+ memcpy(getDur().writingPtr(toupdate->data, sz), objNew.objdata(), sz);
return dl;
}
@@ -1092,7 +1092,7 @@ namespace mongo {
bool dropDups = idx.dropDups() || inDBRepair;
BSONObj order = idx.keyPattern();
- dur::writingDiskLoc(idx.head).Null();
+ getDur().writingDiskLoc(idx.head).Null();
if ( logLevel > 1 ) printMemInfo( "before index start" );
@@ -1343,7 +1343,7 @@ namespace mongo {
if ( d == 0 || (d->flags & NamespaceDetails::Flag_HaveIdIndex) )
return;
- *dur::writing(&d->flags) |= NamespaceDetails::Flag_HaveIdIndex;
+ *getDur().writing(&d->flags) |= NamespaceDetails::Flag_HaveIdIndex;
{
NamespaceDetails::IndexIterator i = d->ii();
@@ -1543,7 +1543,7 @@ namespace mongo {
Record *r = loc.rec();
{
assert( r->lengthWithHeaders >= lenWHdr );
- r = (Record*) dur::writingPtr(r, lenWHdr);
+ r = (Record*) getDur().writingPtr(r, lenWHdr);
if( addID ) {
/* a little effort was made here to avoid a double copy when we add an ID */
((int&)*r->data) = *((int*) obuf) + newId->size();
@@ -1559,7 +1559,7 @@ namespace mongo {
{
Extent *e = r->myExtent(loc);
if ( e->lastRecord.isNull() ) {
- Extent::FL *fl = dur::writing(e->fl());
+ Extent::FL *fl = getDur().writing(e->fl());
fl->firstRecord = fl->lastRecord = loc;
r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
}
@@ -1567,14 +1567,14 @@ namespace mongo {
Record *oldlast = e->lastRecord.rec();
r->prevOfs = e->lastRecord.getOfs();
r->nextOfs = DiskLoc::NullOfs;
- dur::writingInt(oldlast->nextOfs) = loc.getOfs();
- dur::writingDiskLoc(e->lastRecord) = loc;
+ getDur().writingInt(oldlast->nextOfs) = loc.getOfs();
+ getDur().writingDiskLoc(e->lastRecord) = loc;
}
}
/* durability todo : this could be a bit annoying / slow to record constantly */
{
- NamespaceDetails::Stats *s = dur::writing(&d->stats);
+ NamespaceDetails::Stats *s = getDur().writing(&d->stats);
s->datasize += r->netLength();
s->nrecords++;
}
@@ -1598,7 +1598,7 @@ namespace mongo {
int idxNo = tableToIndex->nIndexes;
IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str(), !background); // clear transient info caches so they refresh; increments nIndexes
- dur::writingDiskLoc(idx.info) = loc;
+ getDur().writingDiskLoc(idx.info) = loc;
try {
buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background);
} catch( DBException& e ) {
@@ -1678,24 +1678,24 @@ namespace mongo {
Extent *e = r->myExtent(loc);
if ( e->lastRecord.isNull() ) {
- Extent::FL *fl = dur::writing( e->fl() );
+ Extent::FL *fl = getDur().writing( e->fl() );
fl->firstRecord = fl->lastRecord = loc;
- Record::NP *np = dur::writing(r->np());
+ Record::NP *np = getDur().writing(r->np());
np->nextOfs = np->prevOfs = DiskLoc::NullOfs;
}
else {
Record *oldlast = e->lastRecord.rec();
- Record::NP *np = dur::writing(r->np());
+ Record::NP *np = getDur().writing(r->np());
np->prevOfs = e->lastRecord.getOfs();
np->nextOfs = DiskLoc::NullOfs;
- dur::writingInt( oldlast->nextOfs ) = loc.getOfs();
+ getDur().writingInt( oldlast->nextOfs ) = loc.getOfs();
e->lastRecord.writing() = loc;
}
/* todo: don't update for oplog? seems wasteful. */
{
- NamespaceDetails::Stats *s = dur::writing(&d->stats);
+ NamespaceDetails::Stats *s = getDur().writing(&d->stats);
s->datasize += r->netLength();
s->nrecords++;
}
diff --git a/db/pdfile.h b/db/pdfile.h
index ad95223671b..2ed2014754e 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -271,7 +271,7 @@ namespace mongo {
DiskLoc lastRecord;
};
/** often we want to update just the firstRecord and lastRecord fields.
- this helper is for that -- for use with dur::writing() method
+ this helper is for that -- for use with getDur().writing() method
*/
FL* fl() { return (FL*) &firstRecord; }
private:
@@ -312,7 +312,7 @@ namespace mongo {
if ( uninitialized() ) {
assert(filelength > 32768 );
assert( HeaderSize == 8192 );
- DataFileHeader *h = dur::writing(this);
+ DataFileHeader *h = getDur().writing(this);
h->fileLength = filelength;
h->version = VERSION;
h->versionMinor = VERSION_MINOR;
diff --git a/dbtests/btreetests.cpp b/dbtests/btreetests.cpp
index a1444ad5d9d..50e17ac64e2 100644
--- a/dbtests/btreetests.cpp
+++ b/dbtests/btreetests.cpp
@@ -410,8 +410,8 @@ namespace BtreeTests {
}
// too much work to try to make this happen through inserts and deletes
// we are intentionally manipulating the btree bucket directly here
- dur::writingDiskLoc( const_cast< DiskLoc& >( bt()->keyNode( 1 ).prevChildBucket ) ) = DiskLoc();
- dur::writingInt( const_cast< DiskLoc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ getDur().writingDiskLoc( const_cast< DiskLoc& >( bt()->keyNode( 1 ).prevChildBucket ) ) = DiskLoc();
+ getDur().writingInt( const_cast< DiskLoc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
BSONObj k = BSON( "a" << toInsert );
Base::insert( k );
}
@@ -542,7 +542,7 @@ namespace BtreeTests {
}
static void set( const DiskLoc &l, IndexDetails &id ) {
ArtificialTree::is( id.head )->deallocBucket( id.head, id );
- dur::writingDiskLoc(id.head) = l;
+ getDur().writingDiskLoc(id.head) = l;
}
static string expectedKey( const char *spec ) {
if ( spec[ 0 ] != '$' ) {
@@ -1449,7 +1449,7 @@ namespace BtreeTests {
void run() {
string ns = id().indexNamespace();
ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
- dur::writingInt( const_cast< DiskLoc& >( bt()->keyNode( 1 ).prevChildBucket.btree()->keyNode( 0 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ getDur().writingInt( const_cast< DiskLoc& >( bt()->keyNode( 1 ).prevChildBucket.btree()->keyNode( 0 ).recordLoc ).GETOFS() ) |= 1; // make unused
int unused = 0;
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 1, unused );
diff --git a/dbtests/framework.cpp b/dbtests/framework.cpp
index 1fc023833bb..8c6595b79de 100644
--- a/dbtests/framework.cpp
+++ b/dbtests/framework.cpp
@@ -199,6 +199,7 @@ namespace mongo {
}
if( params.count("dur") ) {
+ enableDurability();
cmdLine.dur = true;
}
@@ -256,7 +257,7 @@ namespace mongo {
filter = params["filter"].as<string>();
}
- dur::startup();
+ getDur().startup();
int ret = run(suites,filter);
diff --git a/dbtests/namespacetests.cpp b/dbtests/namespacetests.cpp
index 5588bf7c2cd..170d7175637 100644
--- a/dbtests/namespacetests.cpp
+++ b/dbtests/namespacetests.cpp
@@ -620,7 +620,7 @@ namespace NamespaceTests {
return ns_;
}
NamespaceDetails *nsd() const {
- return dur::writing( nsdetails( ns() ) );
+ return getDur().writing( nsdetails( ns() ) );
}
static BSONObj bigObj() {
string as( 187, 'a' );
diff --git a/dbtests/pdfiletests.cpp b/dbtests/pdfiletests.cpp
index 1c4b3f3f6b7..772bc2e697c 100644
--- a/dbtests/pdfiletests.cpp
+++ b/dbtests/pdfiletests.cpp
@@ -71,7 +71,7 @@ namespace PdfileTests {
BSONObj o = b.done();
int len = o.objsize();
Extent *e = ext.ext();
- e = dur::writing(e);
+ e = getDur().writing(e);
int ofs;
if ( e->lastRecord.isNull() )
ofs = ext.getOfs() + ( e->_extentData - (char *)e );
@@ -79,7 +79,7 @@ namespace PdfileTests {
ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders;
DiskLoc dl( ext.a(), ofs );
Record *r = dl.rec();
- r = (Record*) dur::writingPtr(r, Record::HeaderSize + len);
+ r = (Record*) getDur().writingPtr(r, Record::HeaderSize + len);
r->lengthWithHeaders = Record::HeaderSize + len;
r->extentOfs = e->myLoc.getOfs();
r->nextOfs = DiskLoc::NullOfs;
@@ -88,7 +88,7 @@ namespace PdfileTests {
if ( e->firstRecord.isNull() )
e->firstRecord = dl;
else
- dur::writingInt(e->lastRecord.rec()->nextOfs) = ofs;
+ getDur().writingInt(e->lastRecord.rec()->nextOfs) = ofs;
e->lastRecord = dl;
return dl;
}
@@ -112,7 +112,7 @@ namespace PdfileTests {
class EmptyLooped : public Base {
virtual void prepare() {
- dur::writing(nsd())->capFirstNewRecord = DiskLoc();
+ getDur().writing(nsd())->capFirstNewRecord = DiskLoc();
}
virtual int count() const {
return 0;
@@ -121,7 +121,7 @@ namespace PdfileTests {
class EmptyMultiExtentLooped : public Base {
virtual void prepare() {
- dur::writing( nsd() ) ->capFirstNewRecord = DiskLoc();
+ getDur().writing( nsd() ) ->capFirstNewRecord = DiskLoc();
}
virtual int count() const {
return 0;
@@ -133,7 +133,7 @@ namespace PdfileTests {
class Single : public Base {
virtual void prepare() {
- dur::writing( nsd() )->capFirstNewRecord = insert( nsd()->capExtent, 0 );
+ getDur().writing( nsd() )->capFirstNewRecord = insert( nsd()->capExtent, 0 );
}
virtual int count() const {
return 1;
@@ -143,11 +143,11 @@ namespace PdfileTests {
class NewCapFirst : public Base {
virtual void prepare() {
DiskLoc x = insert( nsd()->capExtent, 0 );
- dur::debugCheckLastDeclaredWrite();
- dur::writing( nsd() )->capFirstNewRecord = x;
- dur::debugCheckLastDeclaredWrite();
+ getDur().debugCheckLastDeclaredWrite();
+ getDur().writing( nsd() )->capFirstNewRecord = x;
+ getDur().debugCheckLastDeclaredWrite();
insert( nsd()->capExtent, 1 );
- dur::debugCheckLastDeclaredWrite();
+ getDur().debugCheckLastDeclaredWrite();
}
virtual int count() const {
return 2;
diff --git a/util/hashtab.h b/util/hashtab.h
index 340d5b7d30d..4f25aa74318 100644
--- a/util/hashtab.h
+++ b/util/hashtab.h
@@ -129,7 +129,7 @@ namespace mongo {
int i = _find(k, found);
if ( i >= 0 && found ) {
Node* n = &nodes(i);
- n = dur::writing(n);
+ n = getDur().writing(n);
n->k.kill();
n->setUnused();
}
@@ -141,7 +141,7 @@ namespace mongo {
int i = _find(k, found);
if ( i < 0 )
return false;
- Node* n = dur::writing( &nodes(i) );
+ Node* n = getDur().writing( &nodes(i) );
if ( !found ) {
n->k = k;
n->hash = k.hash();