summaryrefslogtreecommitdiff
path: root/s/d_migrate.cpp
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2011-05-11 15:05:00 -0400
committerMathias Stearn <mathias@10gen.com>2011-05-11 15:19:06 -0400
commitbc1d98cd0535ba48ef424aa4d53f9a2e234fa9b0 (patch)
treecfe7ea13f491155228fc05d49d8a1ecf5c9dcd34 /s/d_migrate.cpp
parent1e83899c0637911d8ed5531fd483eef97f3ef8eb (diff)
downloadmongo-bc1d98cd0535ba48ef424aa4d53f9a2e234fa9b0.tar.gz
Yield lock while building array of objects to migrate
Diffstat (limited to 's/d_migrate.cpp')
-rw-r--r--s/d_migrate.cpp46
1 files changed, 31 insertions, 15 deletions
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index 09700c9b1dc..627bd75c124 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -494,29 +494,45 @@ namespace mongo {
return false;
}
- readlock l( _ns );
- Client::Context ctx( _ns );
+ ElapsedTracker tracker (128, 10); // same as ClientCursor::_yieldSometimesTracker
- NamespaceDetails *d = nsdetails( _ns.c_str() );
- assert( d );
+ int allocSize;
+ {
+ readlock l(_ns);
+ Client::Context ctx( _ns );
+ NamespaceDetails *d = nsdetails( _ns.c_str() );
+ assert( d );
+ allocSize = std::min(BSONObjMaxUserSize, (int)((12 + d->averageObjectSize()) * _cloneLocs.size()));
+ }
+ BSONArrayBuilder a (allocSize);
+
+ bool keepGoing = true;
+ while (keepGoing && !_cloneLocs.empty()){
+ readlock l( _ns );
+ Client::Context ctx( _ns );
- BSONArrayBuilder a( std::min( BSONObjMaxUserSize , (int)( ( 12 + d->averageObjectSize() )* _cloneLocs.size() ) ) );
+ set<DiskLoc>::iterator i = _cloneLocs.begin();
+ for ( ; i!=_cloneLocs.end(); ++i ) {
+ if (tracker.ping()) // should I yield?
+ break;
- set<DiskLoc>::iterator i = _cloneLocs.begin();
- for ( ; i!=_cloneLocs.end(); ++i ) {
- DiskLoc dl = *i;
- BSONObj o = dl.obj();
+ DiskLoc dl = *i;
+ BSONObj o = dl.obj();
- // use the builder size instead of accumulating 'o's size so that we take into consideration
- // the overhead of BSONArray indices
- if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
- break;
+ // use the builder size instead of accumulating 'o's size so that we take into consideration
+ // the overhead of BSONArray indices
+ if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
+ keepGoing = false; // break out of outer while loop
+ break;
+ }
+
+ a.append( o );
}
- a.append( o );
+
+ _cloneLocs.erase( _cloneLocs.begin() , i );
}
result.appendArray( "objects" , a.arr() );
- _cloneLocs.erase( _cloneLocs.begin() , i );
return true;
}