summaryrefslogtreecommitdiff
path: root/db
diff options
context:
space:
mode:
authorKristina <kristina@10gen.com>2011-04-14 17:36:05 -0400
committerKristina <kristina@10gen.com>2011-04-14 17:36:05 -0400
commit55cbefc22dfd2dc338990654deb71da48596c738 (patch)
treec674a651511bbae6bd6d4e7fe05c5cd0c982ab9e /db
parent260f7852722d5937900e40c362976ce2a71b105d (diff)
parent8e57783af6a01ab2304c47bb82bcbd9ea00dcf14 (diff)
downloadmongo-55cbefc22dfd2dc338990654deb71da48596c738.tar.gz
Merge branch 'master' of github.com:mongodb/mongo
Diffstat (limited to 'db')
-rw-r--r--db/cmdline.cpp14
-rw-r--r--db/cmdline.h14
-rw-r--r--db/commands/distinct.cpp2
-rw-r--r--db/db.cpp8
-rw-r--r--db/dbcommands_admin.cpp55
-rw-r--r--db/geo/2d.cpp148
-rw-r--r--db/matcher.cpp2
-rw-r--r--db/namespace.cpp5
-rw-r--r--db/namespace.h4
-rw-r--r--db/oplog.cpp3
-rw-r--r--db/pdfile.cpp45
-rw-r--r--db/pdfile.h9
12 files changed, 201 insertions, 108 deletions
diff --git a/db/cmdline.cpp b/db/cmdline.cpp
index 900a7829c94..6568a322c06 100644
--- a/db/cmdline.cpp
+++ b/db/cmdline.cpp
@@ -35,6 +35,20 @@ namespace mongo {
string getHostNameCached();
BSONArray argvArray;
+ CmdLine::CmdLine() :
+ port(DefaultDBPort), rest(false), jsonp(false), quiet(false), noTableScan(false), prealloc(true), smallfiles(sizeof(int*) == 4),
+ configsvr(false),
+ quota(false), quotaFiles(8), cpu(false), durOptions(0), oplogSize(0), defaultProfile(0), slowMS(100), pretouch(0), moveParanoia( true ),
+ syncdelay(60), socket("/tmp")
+ {
+ // default may change for this later.
+#if defined(_DURABLEDEFAULTON)
+ dur = true;
+#else
+ dur = false;
+#endif
+ }
+
void CmdLine::addGlobalOptions( boost::program_options::options_description& general ,
boost::program_options::options_description& hidden ) {
/* support for -vv -vvvv etc. */
diff --git a/db/cmdline.h b/db/cmdline.h
index e8055736f86..a239225fee9 100644
--- a/db/cmdline.h
+++ b/db/cmdline.h
@@ -26,17 +26,7 @@ namespace mongo {
/* concurrency: OK/READ */
struct CmdLine {
- CmdLine() :
- port(DefaultDBPort), rest(false), jsonp(false), quiet(false), noTableScan(false), prealloc(true), smallfiles(sizeof(int*) == 4),
- quota(false), quotaFiles(8), cpu(false), durOptions(0), oplogSize(0), defaultProfile(0), slowMS(100), pretouch(0), moveParanoia( true ),
- syncdelay(60), socket("/tmp") {
- // default may change for this later.
-#if defined(_DURABLEDEFAULTON)
- dur = true;
-#else
- dur = false;
-#endif
- }
+ CmdLine();
string binaryName; // mongod or mongos
string cwd; // cwd of when process started
@@ -75,6 +65,8 @@ namespace mongo {
bool prealloc; // --noprealloc no preallocation of data files
bool smallfiles; // --smallfiles allocate smaller data files
+ bool configsvr; // --configsvr
+
bool quota; // --quota
int quotaFiles; // --quotaFiles
bool cpu; // --cpu show cpu time periodically
diff --git a/db/commands/distinct.cpp b/db/commands/distinct.cpp
index a71fbc244b0..4cfc8565d84 100644
--- a/db/commands/distinct.cpp
+++ b/db/commands/distinct.cpp
@@ -111,7 +111,7 @@ namespace mongo {
int now = bb.len();
- uassert(10044, "distinct too big, 4mb cap", ( now + e.size() + 1024 ) < bufSize );
+ uassert(10044, "distinct too big, 16mb cap", ( now + e.size() + 1024 ) < bufSize );
arr.append( e );
BSONElement x( start + now );
diff --git a/db/db.cpp b/db/db.cpp
index 62749774b49..01a61ad07c4 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -869,8 +869,13 @@ int main(int argc, char* argv[]) {
if( params.count("configsvr") ) {
cmdLine.port = CmdLine::ConfigServerPort;
}
- if( params.count("shardsvr") )
+ if( params.count("shardsvr") ) {
+ if( params.count("configsvr") ) {
+ log() << "can't do --shardsvr and --configsvr at the same time" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
cmdLine.port = CmdLine::ShardServerPort;
+ }
}
else {
if ( cmdLine.port <= 0 || cmdLine.port > 65535 ) {
@@ -879,6 +884,7 @@ int main(int argc, char* argv[]) {
}
}
if ( params.count("configsvr" ) ) {
+ cmdLine.configsvr = true;
if (cmdLine.usingReplSets() || replSettings.master || replSettings.slave) {
log() << "replication should not be enabled on a config server" << endl;
::exit(-1);
diff --git a/db/dbcommands_admin.cpp b/db/dbcommands_admin.cpp
index 4b9696d993a..c8e421c0080 100644
--- a/db/dbcommands_admin.cpp
+++ b/db/dbcommands_admin.cpp
@@ -169,33 +169,25 @@ namespace mongo {
}
result.append( "ns", ns );
- result.append( "result" , validateNS( ns.c_str() , d, cmdObj, result) );
+ validateNS( ns.c_str() , d, cmdObj, result);
return 1;
}
private:
- // For historical reasons, all info is available both in a string field (returned) as well as normal fields
- string validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
+ void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
bool scanData = true;
- if( cmdObj.hasElement("scandata") && !cmdObj.getBoolField("scandata") )
+ if( !cmdObj["scandata"].trueValue() )
scanData = false;
bool full = cmdObj["full"].trueValue();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
- stringstream ss;
- ss << "\nvalidate\n";
- //ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
if ( d->capped ){
- ss << " capped:" << d->capped << " max:" << d->max << '\n';
result.append("capped", d->capped);
result.append("max", d->max);
}
- ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString()<< '\n';
- ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString() << '\n';
-
result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
try {
@@ -211,18 +203,13 @@ namespace mongo {
ne++;
killCurrentOp.checkForInterrupt();
}
- ss << " # extents:" << ne << '\n';
result.append("extentCount", ne);
}
catch (...) {
valid=false;
- ss << " extent asserted ";
errors << "extent asserted";
}
- ss << " datasize?:" << d->stats.datasize << " nrecords?:" << d->stats.nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
- ss << " padding:" << d->paddingFactor << '\n';
-
result.appendNumber("datasize", d->stats.datasize);
result.appendNumber("nrecords", d->stats.nrecords);
result.appendNumber("lastExtentSize", d->lastExtentSize);
@@ -231,13 +218,12 @@ namespace mongo {
try {
try {
- ss << " first extent:\n";
- d->firstExtent.ext()->dump(ss); //TODO: should this be in output object?
+ result.append("firstExtentDetails", d->firstExtent.ext()->dump());
+
valid = valid && d->firstExtent.ext()->validates() &&
d->firstExtent.ext()->xprev.isNull();
}
catch (...) {
- ss << "\n exception firstextent\n" << endl;
errors << "exception firstextent";
valid = false;
}
@@ -293,38 +279,27 @@ namespace mongo {
c->advance();
}
if ( d->capped && !d->capLooped() ) {
- ss << " capped outOfOrder:" << outOfOrder;
result.append("cappedOutOfOrder", outOfOrder);
if ( outOfOrder > 1 ) {
valid = false;
- ss << " ???";
errors << "too many out of order records";
}
- else ss << " (OK)";
- ss << '\n';
}
- ss << " " << n << " objects found, nobj:" << d->stats.nrecords << '\n';
result.append("objectsFound", n);
if (full) {
- ss << " " << nInvalid << " invalid BSON objects found\n";
result.append("invalidObjects", nInvalid);
}
- ss << " " << len << " bytes data w/headers\n";
- ss << " " << nlen << " bytes data wout/headers\n";
-
result.appendNumber("bytesWithHeaders", len);
result.appendNumber("bytesWithoutHeaders", nlen);
}
- ss << " deletedList: ";
BSONArrayBuilder deletedListArray;
for ( int i = 0; i < Buckets; i++ ) {
- ss << (d->deletedList[i].isNull() ? '0' : '1');
deletedListArray << d->deletedList[i].isNull();
}
- ss << endl;
+
int ndel = 0;
long long delSize = 0;
int incorrect = 0;
@@ -346,8 +321,6 @@ namespace mongo {
}
if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
- ss << " ?bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k << endl;
-
string err (str::stream() << "bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k);
errors << err;
@@ -364,60 +337,52 @@ namespace mongo {
}
}
catch (...) {
- ss <<" ?exception in deleted chain for bucket " << i << endl;
errors << ("exception in deleted chain for bucket " + BSONObjBuilder::numStr(i));
valid = false;
}
}
- ss << " deleted: n: " << ndel << " size: " << delSize << endl;
result.appendNumber("deletedCount", ndel);
result.appendNumber("deletedSize", delSize);
if ( incorrect ) {
- ss << " ?corrupt: " << incorrect << " records from datafile are in deleted list\n";
errors << (BSONObjBuilder::numStr(incorrect) + " records from datafile are in deleted list");
valid = false;
}
int idxn = 0;
try {
- ss << " nIndexes:" << d->nIndexes << endl;
result.append("nIndexes", d->nIndexes);
BSONObjBuilder indexes; // not using subObjStart to be exception safe
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
IndexDetails& id = i.next();
long long keys = id.head.btree()->fullValidate(id.head, id.keyPattern());
- ss << " " << id.indexNamespace() << " keys:" << keys << endl;
indexes.appendNumber(id.indexNamespace(), keys);
}
result.append("keysPerIndex", indexes.done());
}
catch (...) {
- ss << "\n exception during index validate idxn:" << idxn << endl;
errors << ("exception during index validate idxn " + BSONObjBuilder::numStr(idxn));
valid=false;
}
}
catch (AssertionException) {
- ss << "\n exception during validate\n" << endl;
errors << "exception during validate";
valid = false;
}
- if ( !valid )
- ss << " ns corrupt, requires repair\n";
-
result.appendBool("valid", valid);
result.append("errors", errors.arr());
if ( !full ){
- ss << " warning: Some checks omitted for speed.\n use {full:true} option to do more thorough scan.\n";
result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
}
- return ss.str();
+ if ( !valid ) {
+ result.append("advice", "ns corrupt, requires repair");
+ }
+
}
} validateCmd;
diff --git a/db/geo/2d.cpp b/db/geo/2d.cpp
index 5f8343a54e3..594232da63a 100644
--- a/db/geo/2d.cpp
+++ b/db/geo/2d.cpp
@@ -292,7 +292,7 @@ namespace mongo {
}
GeoHash hash( const Point& p ) const {
- return hash( p._x, p._y );
+ return hash( p._x, p._y );
}
GeoHash hash( double x , double y ) const {
@@ -470,7 +470,7 @@ namespace mongo {
}
double maxDim() const {
- return max( _max._x - _min._x, _max._y - _min._y );
+ return max( _max._x - _min._x, _max._y - _min._y );
}
Point center() const {
@@ -519,7 +519,7 @@ namespace mongo {
_points.push_back( p );
}
- int size( void ) {
+ int size( void ) const {
return _points.size();
}
@@ -528,12 +528,86 @@ namespace mongo {
*
* The algorithm uses a ray casting method.
*/
- bool contains( Point &p ) {
+ bool contains( const Point& p ) const {
+ return contains( p, 0 ) > 0;
+ }
+
+ int contains( const Point &p, double fudge ) const {
+
+ Box fudgeBox( Point( p._x - fudge, p._y - fudge ), Point( p._x + fudge, p._y + fudge ) );
int counter = 0;
Point p1 = _points[0];
for ( int i = 1; i <= size(); i++ ) {
Point p2 = _points[i % size()];
+
+ GEODEBUG( "Doing intersection check of " << fudgeBox << " with seg " << p1 << " to " << p2 );
+
+ // We need to check whether or not this segment intersects our error box
+ if( fudge > 0 &&
+ // Points not too far below box
+ fudgeBox._min._y <= std::max( p1._y, p2._y ) &&
+ // Points not too far above box
+ fudgeBox._max._y >= std::min( p1._y, p2._y ) &&
+ // Points not too far to left of box
+ fudgeBox._min._x <= std::max( p1._x, p2._x ) &&
+ // Points not too far to right of box
+ fudgeBox._max._x >= std::min( p1._x, p2._x ) ) {
+
+ GEODEBUG( "Doing detailed check" );
+
+ // If our box contains one or more of these points, we need to do an exact check.
+ if( fudgeBox.inside(p1) ) {
+ GEODEBUG( "Point 1 inside" );
+ return 0;
+ }
+ if( fudgeBox.inside(p2) ) {
+ GEODEBUG( "Point 2 inside" );
+ return 0;
+ }
+
+ // Do intersection check for vertical sides
+ if ( p1._y != p2._y ) {
+
+ double invSlope = ( p2._x - p1._x ) / ( p2._y - p1._y );
+
+ double xintersT = ( fudgeBox._max._y - p1._y ) * invSlope + p1._x;
+ if( fudgeBox._min._x <= xintersT && fudgeBox._max._x >= xintersT ) {
+ GEODEBUG( "Top intersection @ " << xintersT );
+ return 0;
+ }
+
+ double xintersB = ( fudgeBox._min._y - p1._y ) * invSlope + p1._x;
+ if( fudgeBox._min._x <= xintersB && fudgeBox._max._x >= xintersB ) {
+ GEODEBUG( "Bottom intersection @ " << xintersB );
+ return 0;
+ }
+
+ }
+
+ // Do intersection check for horizontal sides
+ if( p1._x != p2._x ) {
+
+ double slope = ( p2._y - p1._y ) / ( p2._x - p1._x );
+
+ double yintersR = ( p1._x - fudgeBox._max._x ) * slope + p1._y;
+ if( fudgeBox._min._y <= yintersR && fudgeBox._max._y >= yintersR ) {
+ GEODEBUG( "Right intersection @ " << yintersR );
+ return 0;
+ }
+
+ double yintersL = ( p1._x - fudgeBox._min._x ) * slope + p1._y;
+ if( fudgeBox._min._y <= yintersL && fudgeBox._max._y >= yintersL ) {
+ GEODEBUG( "Left intersection @ " << yintersL );
+ return 0;
+ }
+
+ }
+
+ }
+
+ // Normal intersection test.
+ // TODO: Invert these for clearer logic?
if ( p._y > std::min( p1._y, p2._y ) ) {
if ( p._y <= std::max( p1._y, p2._y ) ) {
if ( p._x <= std::max( p1._x, p2._x ) ) {
@@ -546,14 +620,15 @@ namespace mongo {
}
}
}
+
p1 = p2;
}
if ( counter % 2 == 0 ) {
- return false;
+ return -1;
}
else {
- return true;
+ return 1;
}
}
@@ -597,17 +672,17 @@ namespace mongo {
Box bounds( void ) {
- // TODO: Cache this
+ // TODO: Cache this
- _bounds._max = _points[0];
- _bounds._min = _points[0];
+ _bounds._max = _points[0];
+ _bounds._min = _points[0];
for ( int i = 1; i < size(); i++ ) {
- _bounds._max._x = max( _bounds._max._x, _points[i]._x );
- _bounds._max._y = max( _bounds._max._y, _points[i]._y );
- _bounds._min._x = min( _bounds._min._x, _points[i]._x );
- _bounds._min._y = min( _bounds._min._y, _points[i]._y );
+ _bounds._max._x = max( _bounds._max._x, _points[i]._x );
+ _bounds._max._y = max( _bounds._max._y, _points[i]._y );
+ _bounds._min._x = min( _bounds._min._x, _points[i]._x );
+ _bounds._min._y = min( _bounds._min._y, _points[i]._y );
}
@@ -2167,28 +2242,53 @@ namespace mongo {
// Whether the current box width is big enough for our search area
virtual bool fitsInBox( double width ) {
- return _maxDim <= width;
+ return _maxDim <= width;
}
// Whether the current box overlaps our search area
virtual bool intersectsBox( Box& cur ) {
- return _bounds.intersects( cur );
+ return _bounds.intersects( cur );
}
virtual bool checkDistance( const KeyNode& node, double& d ) {
- Point p = Point( _g , GeoHash( node.key.firstElement() ) );
+ Point p( _g, GeoHash( node.key.firstElement() ) );
+
+ int in = _poly.contains( p, _g->_error );
+ if( in != 0 ) {
- // Use the point in polygon algorihtm to see if the point
- // is contained in the polygon.
- bool in = _poly.contains( p );
- if ( in ) {
- GEODEBUG( "Point: [" << p._x << ", " << p._y << "] in polygon" );
+ if ( in > 0 ) {
+ GEODEBUG( "Point: [" << p._x << ", " << p._y << "] approx in polygon" );
+ }
+ else {
+ GEODEBUG( "Point: [" << p._x << ", " << p._y << "] approx not in polygon" );
+ }
+
+ if( in != 0 ) return in > 0;
}
- else {
- GEODEBUG( "Point: [" << p._x << ", " << p._y << "] not in polygon" );
+
+ // Do exact check, since to approximate check was inconclusive
+ vector< BSONObj > locs;
+ _g->getKeys( node.recordLoc.obj(), locs );
+
+ for( vector< BSONObj >::iterator i = locs.begin(); i != locs.end(); ++i ) {
+
+ Point p( *i );
+
+ // Use the point in polygon algorithm to see if the point
+ // is contained in the polygon.
+ bool in = _poly.contains( p );
+ if ( in ) {
+ GEODEBUG( "Point: [" << p._x << ", " << p._y << "] exactly in polygon" );
+ }
+ else {
+ GEODEBUG( "Point: [" << p._x << ", " << p._y << "] exactly not in polygon" );
+ }
+ if( in ) return in;
+
}
- return in;
+
+ return false;
}
private:
diff --git a/db/matcher.cpp b/db/matcher.cpp
index cf85819295c..7def3aac10a 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -40,6 +40,8 @@ namespace {
options.set_multiline(true);
else if ( *flags == 'x' )
options.set_extended(true);
+ else if ( *flags == 's' )
+ options.set_dotall(true);
flags++;
}
return options;
diff --git a/db/namespace.cpp b/db/namespace.cpp
index c90888a5417..d8559226301 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -620,7 +620,7 @@ namespace mongo {
options: { capped : ..., size : ... }
*/
void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0) {
- log(1) << "New namespace: " << ns << '\n';
+ LOG(1) << "New namespace: " << ns << endl;
if ( strstr(ns, "system.namespaces") ) {
// system.namespaces holds all the others, so it is not explicitly listed in the catalog.
// TODO: fix above should not be strstr!
@@ -636,6 +636,9 @@ namespace mongo {
char database[256];
nsToDatabase(ns, database);
string s = database;
+ if( cmdLine.configsvr && (s != "config" && s != "admin") ) {
+ uasserted(14036, "can't create user databases on a --configsvr instance");
+ }
s += ".system.namespaces";
theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
}
diff --git a/db/namespace.h b/db/namespace.h
index 2a357265b4f..64f5c4723b6 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -93,8 +93,8 @@ namespace mongo {
namespace mongo {
- /** @return true if a client can modify this namespace
- things like *.system.users
+ /** @return true if a client can modify this namespace even though it is under ".system."
+ For example <dbname>.system.users is ok for regular clients to update.
@param write used when .system.js
*/
bool legalClientSystemNS( const string& ns , bool write );
diff --git a/db/oplog.cpp b/db/oplog.cpp
index 1557cbdd822..c73f6293e2d 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -23,6 +23,7 @@
#include "commands.h"
#include "repl/rs.h"
#include "stats/counters.h"
+#include "../util/file.h"
namespace mongo {
@@ -361,7 +362,7 @@ namespace mongo {
sz = (256-64) * 1000 * 1000;
#else
sz = 990.0 * 1000 * 1000;
- boost::intmax_t free = freeSpace(); //-1 if call not supported.
+ boost::intmax_t free = File::freeSpace(dbpath); //-1 if call not supported.
double fivePct = free * 0.05;
if ( fivePct > sz )
sz = fivePct;
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 70c15d1606d..ef6f64b1090 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -30,6 +30,7 @@ _ disallow system* manipulations from the database.
#include "../util/hashtab.h"
#include "../util/file_allocator.h"
#include "../util/processinfo.h"
+#include "../util/file.h"
#include "btree.h"
#include <algorithm>
#include <list>
@@ -485,11 +486,15 @@ namespace mongo {
low = (int) (approxSize * 0.8);
high = (int) (approxSize * 1.4);
}
- if( high < 0 ) high = approxSize;
+ if( high <= 0 ) {
+ // overflowed
+ high = max(approxSize, Extent::maxSize());
+ }
int n = 0;
Extent *best = 0;
int bestDiff = 0x7fffffff;
{
+ Timer t;
DiskLoc L = f->firstExtent;
while( !L.isNull() ) {
Extent * e = L.ext();
@@ -498,13 +503,30 @@ namespace mongo {
if( diff < bestDiff ) {
bestDiff = diff;
best = e;
- if( diff == 0 )
+ if( ((double) diff) / approxSize < 0.1 ) {
+ // close enough
+ break;
+ }
+ if( t.seconds() >= 2 ) {
+ // have spent lots of time in write lock, and we are in [low,high], so close enough
+ // could come into play if extent freelist is very long
break;
+ }
+ }
+ else {
+ OCCASIONALLY {
+ if( high < 64 * 1024 && t.seconds() >= 2 ) {
+ // be less picky if it is taking a long time
+ high = 64 * 1024;
+ }
+ }
}
}
L = e->xnext;
++n;
-
+ }
+ if( t.seconds() >= 10 ) {
+ log() << "warning: slow scan in allocFromFreeList (in write lock)" << endl;
}
}
OCCASIONALLY if( n > 512 ) log() << "warning: newExtent " << n << " scanned\n";
@@ -1932,21 +1954,6 @@ namespace mongo {
return sa.size();
}
-#if !defined(_WIN32)
-} // namespace mongo
-#include <sys/statvfs.h>
-namespace mongo {
-#endif
- boost::intmax_t freeSpace ( const string &path ) {
-#if !defined(_WIN32)
- struct statvfs info;
- assert( !statvfs( path.c_str() , &info ) );
- return boost::intmax_t( info.f_bavail ) * info.f_frsize;
-#else
- return -1;
-#endif
- }
-
bool repairDatabase( string dbNameS , string &errmsg,
bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) {
doingRepair dr;
@@ -1966,7 +1973,7 @@ namespace mongo {
getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
boost::intmax_t totalSize = dbSize( dbName );
- boost::intmax_t freeSize = freeSpace( repairpath );
+ boost::intmax_t freeSize = File::freeSpace(repairpath);
if ( freeSize > -1 && freeSize < totalSize ) {
stringstream ss;
ss << "Cannot repair database " << dbName << " having size: " << totalSize
diff --git a/db/pdfile.h b/db/pdfile.h
index 4b58daebec2..3421e345a46 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -52,9 +52,6 @@ namespace mongo {
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication, bool *deferIdIndex = 0);
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc=DiskLoc());
- // -1 if library unavailable.
- boost::intmax_t freeSpace( const string &path = dbpath );
-
bool isValidNS( const StringData& ns );
/*---------------------------------------------------------------------*/
@@ -247,6 +244,12 @@ namespace mongo {
length >= 0 && !myLoc.isNull();
}
+ BSONObj dump() {
+ return BSON( "loc" << myLoc.toString() << "xnext" << xnext.toString() << "xprev" << xprev.toString()
+ << "nsdiag" << nsDiagnostic.toString()
+ << "size" << length << "firstRecord" << firstRecord.toString() << "lastRecord" << lastRecord.toString());
+ }
+
void dump(iostream& s) {
s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
s << " nsdiag:" << nsDiagnostic.toString() << '\n';