summaryrefslogtreecommitdiff
path: root/db
diff options
context:
space:
mode:
Diffstat (limited to 'db')
-rw-r--r--db/btree.cpp1285
-rw-r--r--db/btree.h356
-rw-r--r--db/btreecursor.cpp314
-rw-r--r--db/clientcursor.cpp288
-rw-r--r--db/clientcursor.h96
-rw-r--r--db/cloner.cpp206
-rw-r--r--db/commands.cpp42
-rw-r--r--db/commands.h24
-rw-r--r--db/cursor.h249
-rw-r--r--db/database.h134
-rw-r--r--db/db.cpp568
-rw-r--r--db/db.h133
-rw-r--r--db/dbcommands.cpp559
-rw-r--r--db/dbeval.cpp104
-rw-r--r--db/dbhelpers.cpp14
-rw-r--r--db/dbhelpers.h24
-rw-r--r--db/dbinfo.cpp16
-rw-r--r--db/dbinfo.h28
-rw-r--r--db/dbmessage.h158
-rw-r--r--db/dbwebserver.cpp68
-rw-r--r--db/instance.cpp722
-rw-r--r--db/instance.h70
-rw-r--r--db/introspect.cpp42
-rw-r--r--db/introspect.h58
-rw-r--r--db/javajs.cpp916
-rw-r--r--db/javajs.h269
-rw-r--r--db/javatest.cpp12
-rw-r--r--db/jsobj.cpp937
-rw-r--r--db/jsobj.h993
-rw-r--r--db/json.cpp89
-rw-r--r--db/json.h6
-rw-r--r--db/matcher.cpp674
-rw-r--r--db/matcher.h119
-rw-r--r--db/minilex.h176
-rw-r--r--db/namespace.cpp545
-rw-r--r--db/namespace.h450
-rw-r--r--db/pdfile.cpp1518
-rw-r--r--db/pdfile.h484
-rw-r--r--db/query.cpp1246
-rw-r--r--db/query.h18
-rw-r--r--db/queryoptimizer.cpp14
-rw-r--r--db/queryoptimizer.h18
-rw-r--r--db/repl.cpp1172
-rw-r--r--db/repl.h202
-rw-r--r--db/replset.h72
-rw-r--r--db/resource.h8
-rw-r--r--db/scanandorder.h183
-rw-r--r--db/storage.h175
-rw-r--r--db/tests.cpp79
49 files changed, 8184 insertions, 7749 deletions
diff --git a/db/btree.cpp b/db/btree.cpp
index eca7b8f315c..5c652a71965 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -29,774 +29,783 @@ extern int otherTraceLevel;
int split_debug = 0;
int insert_debug = 0;
-KeyNode::KeyNode(BucketBasics& bb, _KeyNode &k) :
- prevChildBucket(k.prevChildBucket),
- recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
+KeyNode::KeyNode(BucketBasics& bb, _KeyNode &k) :
+ prevChildBucket(k.prevChildBucket),
+ recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
{ }
/* BucketBasics --------------------------------------------------- */
int BucketBasics::Size() const {
- assert( _Size == BucketSize );
- return _Size;
+ assert( _Size == BucketSize );
+ return _Size;
+}
+inline void BucketBasics::setNotPacked() {
+ flags &= ~Packed;
+}
+inline void BucketBasics::setPacked() {
+ flags |= Packed;
}
-inline void BucketBasics::setNotPacked() { flags &= ~Packed; }
-inline void BucketBasics::setPacked() { flags |= Packed; }
void BucketBasics::_shape(int level, stringstream& ss) {
- for( int i = 0; i < level; i++ ) ss << ' ';
- ss << "*\n";
- for( int i = 0; i < n; i++ )
- if( !k(i).prevChildBucket.isNull() )
- k(i).prevChildBucket.btree()->_shape(level+1,ss);
- if( !nextChild.isNull() )
- nextChild.btree()->_shape(level+1,ss);
+ for ( int i = 0; i < level; i++ ) ss << ' ';
+ ss << "*\n";
+ for ( int i = 0; i < n; i++ )
+ if ( !k(i).prevChildBucket.isNull() )
+ k(i).prevChildBucket.btree()->_shape(level+1,ss);
+ if ( !nextChild.isNull() )
+ nextChild.btree()->_shape(level+1,ss);
}
int bt_fv=0;
int bt_dmp=0;
-void BucketBasics::dumpTree(DiskLoc thisLoc) {
- bt_dmp=1;
- fullValidate(thisLoc);
- bt_dmp=0;
+void BucketBasics::dumpTree(DiskLoc thisLoc) {
+ bt_dmp=1;
+ fullValidate(thisLoc);
+ bt_dmp=0;
}
-int BucketBasics::fullValidate(const DiskLoc& thisLoc) {
- assertValid(true);
-// if( bt_fv==0 )
-// return;
-
- if( bt_dmp ) {
- cout << thisLoc.toString() << ' ';
- ((BtreeBucket *) this)->dump();
- }
-
- // keycount
- int kc = 0;
-
- for( int i = 0; i < n; i++ ) {
- _KeyNode& kn = k(i);
-
- if( kn.isUsed() ) kc++;
- if( !kn.prevChildBucket.isNull() ) {
- DiskLoc left = kn.prevChildBucket;
- BtreeBucket *b = left.btree();
- wassert( b->parent == thisLoc );
- kc += b->fullValidate(kn.prevChildBucket);
- }
- }
- if( !nextChild.isNull() ) {
- BtreeBucket *b = nextChild.btree();
- wassert( b->parent == thisLoc );
- kc += b->fullValidate(nextChild);
- }
-
- return kc;
+int BucketBasics::fullValidate(const DiskLoc& thisLoc) {
+ assertValid(true);
+// if( bt_fv==0 )
+// return;
+
+ if ( bt_dmp ) {
+ cout << thisLoc.toString() << ' ';
+ ((BtreeBucket *) this)->dump();
+ }
+
+ // keycount
+ int kc = 0;
+
+ for ( int i = 0; i < n; i++ ) {
+ _KeyNode& kn = k(i);
+
+ if ( kn.isUsed() ) kc++;
+ if ( !kn.prevChildBucket.isNull() ) {
+ DiskLoc left = kn.prevChildBucket;
+ BtreeBucket *b = left.btree();
+ wassert( b->parent == thisLoc );
+ kc += b->fullValidate(kn.prevChildBucket);
+ }
+ }
+ if ( !nextChild.isNull() ) {
+ BtreeBucket *b = nextChild.btree();
+ wassert( b->parent == thisLoc );
+ kc += b->fullValidate(nextChild);
+ }
+
+ return kc;
}
int nDumped = 0;
-void BucketBasics::assertValid(bool force) {
- if( !debug && !force )
- return;
- wassert( n >= 0 && n < Size() );
- wassert( emptySize >= 0 && emptySize < BucketSize );
- wassert( topSize >= n && topSize <= BucketSize );
- DEV {
- // slow:
- for( int i = 0; i < n-1; i++ ) {
- BSONObj k1 = keyNode(i).key;
- BSONObj k2 = keyNode(i+1).key;
- int z = k1.woCompare(k2); //OK
- if( z > 0 ) {
- cout << "ERROR: btree key order corrupt. Keys:" << endl;
- if( ++nDumped < 5 ) {
- for( int j = 0; j < n; j++ ) {
- cout << " " << keyNode(j).key.toString() << endl;
- }
- ((BtreeBucket *) this)->dump();
- }
- wassert(false);
- break;
- }
- else if( z == 0 ) {
- if( !(k(i).recordLoc < k(i+1).recordLoc) ) {
- cout << "ERROR: btree key order corrupt (recordloc's wrong). Keys:" << endl;
- cout << " k(" << i << "):" << keyNode(i).key.toString() << " RL:" << k(i).recordLoc.toString() << endl;
- cout << " k(" << i+1 << "):" << keyNode(i+1).key.toString() << " RL:" << k(i+1).recordLoc.toString() << endl;
- wassert( k(i).recordLoc < k(i+1).recordLoc );
- }
- }
- }
- }
- else {
- //faster:
- if( n > 1 ) {
- BSONObj k1 = keyNode(0).key;
- BSONObj k2 = keyNode(n-1).key;
- int z = k1.woCompare(k2);
- //wassert( z <= 0 );
- if( z > 0 ) {
+void BucketBasics::assertValid(bool force) {
+ if ( !debug && !force )
+ return;
+ wassert( n >= 0 && n < Size() );
+ wassert( emptySize >= 0 && emptySize < BucketSize );
+ wassert( topSize >= n && topSize <= BucketSize );
+ DEV {
+ // slow:
+ for ( int i = 0; i < n-1; i++ ) {
+ BSONObj k1 = keyNode(i).key;
+ BSONObj k2 = keyNode(i+1).key;
+ int z = k1.woCompare(k2); //OK
+ if ( z > 0 ) {
+ cout << "ERROR: btree key order corrupt. Keys:" << endl;
+ if ( ++nDumped < 5 ) {
+ for ( int j = 0; j < n; j++ ) {
+ cout << " " << keyNode(j).key.toString() << endl;
+ }
+ ((BtreeBucket *) this)->dump();
+ }
+ wassert(false);
+ break;
+ }
+ else if ( z == 0 ) {
+ if ( !(k(i).recordLoc < k(i+1).recordLoc) ) {
+ cout << "ERROR: btree key order corrupt (recordloc's wrong). Keys:" << endl;
+ cout << " k(" << i << "):" << keyNode(i).key.toString() << " RL:" << k(i).recordLoc.toString() << endl;
+ cout << " k(" << i+1 << "):" << keyNode(i+1).key.toString() << " RL:" << k(i+1).recordLoc.toString() << endl;
+ wassert( k(i).recordLoc < k(i+1).recordLoc );
+ }
+ }
+ }
+ }
+ else {
+ //faster:
+ if ( n > 1 ) {
+ BSONObj k1 = keyNode(0).key;
+ BSONObj k2 = keyNode(n-1).key;
+ int z = k1.woCompare(k2);
+ //wassert( z <= 0 );
+ if ( z > 0 ) {
problem() << "btree keys out of order" << '\n';
- ONCE {
+ ONCE {
((BtreeBucket *) this)->dump();
}
assert(false);
}
- }
- }
+ }
+ }
}
-inline void BucketBasics::markUnused(int keypos) {
- assert( keypos >= 0 && keypos < n );
- k(keypos).setUnused();
+inline void BucketBasics::markUnused(int keypos) {
+ assert( keypos >= 0 && keypos < n );
+ k(keypos).setUnused();
}
inline int BucketBasics::totalDataSize() const {
- return Size() - (data-(char*)this);
+ return Size() - (data-(char*)this);
}
-void BucketBasics::init(){
- parent.Null(); nextChild.Null();
- _Size = BucketSize;
- flags = Packed;
- n = 0;
- emptySize = totalDataSize(); topSize = 0;
- reserved = 0;
+void BucketBasics::init() {
+ parent.Null();
+ nextChild.Null();
+ _Size = BucketSize;
+ flags = Packed;
+ n = 0;
+ emptySize = totalDataSize();
+ topSize = 0;
+ reserved = 0;
}
/* we allocate space from the end of the buffer for data.
the keynodes grow from the front.
*/
inline int BucketBasics::_alloc(int bytes) {
- topSize += bytes;
- emptySize -= bytes;
- int ofs = totalDataSize() - topSize;
- assert( ofs > 0 );
- return ofs;
+ topSize += bytes;
+ emptySize -= bytes;
+ int ofs = totalDataSize() - topSize;
+ assert( ofs > 0 );
+ return ofs;
}
-void BucketBasics::_delKeyAtPos(int keypos) {
- assert( keypos >= 0 && keypos <= n );
- assert( childForPos(keypos).isNull() );
- n--;
- assert( n > 0 || nextChild.isNull() );
- for( int j = keypos; j < n; j++ )
- k(j) = k(j+1);
- emptySize += sizeof(_KeyNode);
- setNotPacked();
+void BucketBasics::_delKeyAtPos(int keypos) {
+ assert( keypos >= 0 && keypos <= n );
+ assert( childForPos(keypos).isNull() );
+ n--;
+ assert( n > 0 || nextChild.isNull() );
+ for ( int j = keypos; j < n; j++ )
+ k(j) = k(j+1);
+ emptySize += sizeof(_KeyNode);
+ setNotPacked();
}
/* add a key. must be > all existing. be careful to set next ptr right. */
-void BucketBasics::pushBack(const DiskLoc& recordLoc, BSONObj& key, DiskLoc prevChild) {
- int bytesNeeded = key.objsize() + sizeof(_KeyNode);
- assert( bytesNeeded <= emptySize );
- assert( n == 0 || keyNode(n-1).key.woCompare(key) <= 0 );
- emptySize -= sizeof(_KeyNode);
- _KeyNode& kn = k(n++);
- kn.prevChildBucket = prevChild;
- kn.recordLoc = recordLoc;
- kn.setKeyDataOfs( (short) _alloc(key.objsize()) );
- char *p = dataAt(kn.keyDataOfs());
- memcpy(p, key.objdata(), key.objsize());
+void BucketBasics::pushBack(const DiskLoc& recordLoc, BSONObj& key, DiskLoc prevChild) {
+ int bytesNeeded = key.objsize() + sizeof(_KeyNode);
+ assert( bytesNeeded <= emptySize );
+ assert( n == 0 || keyNode(n-1).key.woCompare(key) <= 0 );
+ emptySize -= sizeof(_KeyNode);
+ _KeyNode& kn = k(n++);
+ kn.prevChildBucket = prevChild;
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs( (short) _alloc(key.objsize()) );
+ char *p = dataAt(kn.keyDataOfs());
+ memcpy(p, key.objdata(), key.objsize());
}
bool BucketBasics::basicInsert(int keypos, const DiskLoc& recordLoc, BSONObj& key) {
- assert( keypos >= 0 && keypos <= n );
- int bytesNeeded = key.objsize() + sizeof(_KeyNode);
- if( bytesNeeded > emptySize ) {
- pack();
- if( bytesNeeded > emptySize )
- return false;
- }
- for( int j = n; j > keypos; j-- ) // make room
- k(j) = k(j-1);
- n++;
- emptySize -= sizeof(_KeyNode);
- _KeyNode& kn = k(keypos);
- kn.prevChildBucket.Null();
- kn.recordLoc = recordLoc;
- kn.setKeyDataOfs((short) _alloc(key.objsize()) );
- char *p = dataAt(kn.keyDataOfs());
- memcpy(p, key.objdata(), key.objsize());
- return true;
-}
-
-/* when we delete things we just leave empty space until the node is
+ assert( keypos >= 0 && keypos <= n );
+ int bytesNeeded = key.objsize() + sizeof(_KeyNode);
+ if ( bytesNeeded > emptySize ) {
+ pack();
+ if ( bytesNeeded > emptySize )
+ return false;
+ }
+ for ( int j = n; j > keypos; j-- ) // make room
+ k(j) = k(j-1);
+ n++;
+ emptySize -= sizeof(_KeyNode);
+ _KeyNode& kn = k(keypos);
+ kn.prevChildBucket.Null();
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs((short) _alloc(key.objsize()) );
+ char *p = dataAt(kn.keyDataOfs());
+ memcpy(p, key.objdata(), key.objsize());
+ return true;
+}
+
+/* when we delete things we just leave empty space until the node is
full and then we repack it.
*/
-void BucketBasics::pack() {
- if( flags & Packed )
- return;
-
- int tdz = totalDataSize();
- char temp[BucketSize];
- int ofs = tdz;
- topSize = 0;
- for( int j = 0; j < n; j++ ) {
- short ofsold = k(j).keyDataOfs();
- int sz = keyNode(j).key.objsize();
- ofs -= sz;
- topSize += sz;
- memcpy(temp+ofs, dataAt(ofsold), sz);
- k(j).setKeyDataOfsSavingUse( ofs );
- }
- int dataUsed = tdz - ofs;
- memcpy(data + ofs, temp + ofs, dataUsed);
- emptySize = tdz - dataUsed - n * sizeof(_KeyNode);
- assert( emptySize >= 0 );
-
- setPacked();
- assertValid();
+void BucketBasics::pack() {
+ if ( flags & Packed )
+ return;
+
+ int tdz = totalDataSize();
+ char temp[BucketSize];
+ int ofs = tdz;
+ topSize = 0;
+ for ( int j = 0; j < n; j++ ) {
+ short ofsold = k(j).keyDataOfs();
+ int sz = keyNode(j).key.objsize();
+ ofs -= sz;
+ topSize += sz;
+ memcpy(temp+ofs, dataAt(ofsold), sz);
+ k(j).setKeyDataOfsSavingUse( ofs );
+ }
+ int dataUsed = tdz - ofs;
+ memcpy(data + ofs, temp + ofs, dataUsed);
+ emptySize = tdz - dataUsed - n * sizeof(_KeyNode);
+ assert( emptySize >= 0 );
+
+ setPacked();
+ assertValid();
}
inline void BucketBasics::truncateTo(int N) {
- n = N;
- setNotPacked();
- pack();
+ n = N;
+ setNotPacked();
+ pack();
}
/* - BtreeBucket --------------------------------------------------- */
/* return largest key in the subtree. */
void BtreeBucket::findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey) {
- DiskLoc loc = thisLoc;
- while( 1 ) {
- BtreeBucket *b = loc.btree();
- if( !b->nextChild.isNull() ) {
- loc = b->nextChild;
- continue;
- }
-
- assert(b->n>0);
- largestLoc = loc;
- largestKey = b->n-1;
-
- break;
- }
-}
+ DiskLoc loc = thisLoc;
+ while ( 1 ) {
+ BtreeBucket *b = loc.btree();
+ if ( !b->nextChild.isNull() ) {
+ loc = b->nextChild;
+ continue;
+ }
+
+ assert(b->n>0);
+ largestLoc = loc;
+ largestKey = b->n-1;
+
+ break;
+ }
+}
/* pos: for existing keys k0...kn-1.
returns # it goes BEFORE. so key[pos-1] < key < key[pos]
returns n if it goes after the last existing key.
note result might be Unused!
*/
-bool BtreeBucket::find(BSONObj& key, DiskLoc recordLoc, int& pos) {
- /* binary search for this key */
- int l=0; int h=n-1;
- while( l <= h ) {
- int m = (l+h)/2;
- KeyNode M = keyNode(m);
- int x = key.woCompare(M.key);
- if( x == 0 )
- x = recordLoc.compare(M.recordLoc);
- if( x < 0 ) // key < M.key
- h = m-1;
- else if( x > 0 )
- l = m+1;
- else {
- // found it. however, if dup keys are here, be careful we might have
- // found one in the middle. we want find() to return the leftmost instance.
-/*
- while( m >= 1 && keyNode(m-1).key.woEqual(key) )
- m--;
-*/
-
- pos = m;
-
-/*
- DiskLoc ch = k(m).prevChildBucket;
- if( !ch.isNull() ) {
- // if dup keys, might be dups to the left.
- DiskLoc largestLoc;
- int largestKey;
- ch.btree()->findLargestKey(ch, largestLoc, largestKey);
- if( !largestLoc.isNull() ) {
- if( largestLoc.btree()->keyAt(largestKey).woEqual(key) )
- return false;
- }
- }
-*/
-
- return true;
- }
+bool BtreeBucket::find(BSONObj& key, DiskLoc recordLoc, int& pos) {
+ /* binary search for this key */
+ int l=0;
+ int h=n-1;
+ while ( l <= h ) {
+ int m = (l+h)/2;
+ KeyNode M = keyNode(m);
+ int x = key.woCompare(M.key);
+ if ( x == 0 )
+ x = recordLoc.compare(M.recordLoc);
+ if ( x < 0 ) // key < M.key
+ h = m-1;
+ else if ( x > 0 )
+ l = m+1;
+ else {
+ // found it. however, if dup keys are here, be careful we might have
+ // found one in the middle. we want find() to return the leftmost instance.
+ /*
+ while( m >= 1 && keyNode(m-1).key.woEqual(key) )
+ m--;
+ */
+
+ pos = m;
+
+ /*
+ DiskLoc ch = k(m).prevChildBucket;
+ if( !ch.isNull() ) {
+ // if dup keys, might be dups to the left.
+ DiskLoc largestLoc;
+ int largestKey;
+ ch.btree()->findLargestKey(ch, largestLoc, largestKey);
+ if( !largestLoc.isNull() ) {
+ if( largestLoc.btree()->keyAt(largestKey).woEqual(key) )
+ return false;
+ }
+ }
+ */
+
+ return true;
+ }
//? x = key.woCompare(M.key);
- }
- // not found
- pos = l;
- if( pos != n ) {
- BSONObj keyatpos = keyNode(pos).key;
- wassert( key.woCompare(keyatpos) <= 0 );
- if( pos > 0 ) {
- wassert( keyNode(pos-1).key.woCompare(key) <= 0 );
- }
- }
-
- return false;
+ }
+ // not found
+ pos = l;
+ if ( pos != n ) {
+ BSONObj keyatpos = keyNode(pos).key;
+ wassert( key.woCompare(keyatpos) <= 0 );
+ if ( pos > 0 ) {
+ wassert( keyNode(pos-1).key.woCompare(key) <= 0 );
+ }
+ }
+
+ return false;
}
void aboutToDeleteBucket(const DiskLoc&);
-void BtreeBucket::delBucket(const DiskLoc& thisLoc, IndexDetails& id) {
- aboutToDeleteBucket(thisLoc);
-
- assert( !isHead() );
-
- BtreeBucket *p = parent.btree();
- if( p->nextChild == thisLoc ) {
- p->nextChild.Null();
- }
- else {
- for( int i = 0; i < p->n; i++ ) {
- if( p->k(i).prevChildBucket == thisLoc ) {
- p->k(i).prevChildBucket.Null();
- goto found;
- }
- }
- cout << "ERROR: can't find ref to deleted bucket.\n";
- cout << "To delete:\n";
- dump();
- cout << "Parent:\n";
- p->dump();
- assert(false);
- }
+void BtreeBucket::delBucket(const DiskLoc& thisLoc, IndexDetails& id) {
+ aboutToDeleteBucket(thisLoc);
+
+ assert( !isHead() );
+
+ BtreeBucket *p = parent.btree();
+ if ( p->nextChild == thisLoc ) {
+ p->nextChild.Null();
+ }
+ else {
+ for ( int i = 0; i < p->n; i++ ) {
+ if ( p->k(i).prevChildBucket == thisLoc ) {
+ p->k(i).prevChildBucket.Null();
+ goto found;
+ }
+ }
+ cout << "ERROR: can't find ref to deleted bucket.\n";
+ cout << "To delete:\n";
+ dump();
+ cout << "Parent:\n";
+ p->dump();
+ assert(false);
+ }
found:
#if 1
- /* as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
+ /* as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
it (meaning it is ineligible for reuse). temporary to see if it helps with some
- issues.
- */
- memset(this, 0, Size());
+ issues.
+ */
+ memset(this, 0, Size());
#else
- //defensive:
- n = -1;
- parent.Null();
- theDataFileMgr.deleteRecord(id.indexNamespace().c_str(), thisLoc.rec(), thisLoc);
+ //defensive:
+ n = -1;
+ parent.Null();
+ theDataFileMgr.deleteRecord(id.indexNamespace().c_str(), thisLoc.rec(), thisLoc);
#endif
}
/* note: may delete the entire bucket! this invalid upon return sometimes. */
-void BtreeBucket::delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p) {
- dassert( thisLoc.btree() == this );
- assert(n>0);
- DiskLoc left = childForPos(p);
-
- if( n == 1 ) {
- if( left.isNull() && nextChild.isNull() ) {
- if( isHead() )
- _delKeyAtPos(p); // we don't delete the top bucket ever
- else
- delBucket(thisLoc, id);
- return;
- }
- markUnused(p);
- return;
- }
-
- if( left.isNull() )
- _delKeyAtPos(p);
- else
- markUnused(p);
+void BtreeBucket::delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p) {
+ dassert( thisLoc.btree() == this );
+ assert(n>0);
+ DiskLoc left = childForPos(p);
+
+ if ( n == 1 ) {
+ if ( left.isNull() && nextChild.isNull() ) {
+ if ( isHead() )
+ _delKeyAtPos(p); // we don't delete the top bucket ever
+ else
+ delBucket(thisLoc, id);
+ return;
+ }
+ markUnused(p);
+ return;
+ }
+
+ if ( left.isNull() )
+ _delKeyAtPos(p);
+ else
+ markUnused(p);
}
int verbose = 0;
int qqq = 0;
bool BtreeBucket::unindex(const DiskLoc& thisLoc, IndexDetails& id, BSONObj& key, const DiskLoc& recordLoc ) {
- if( key.objsize() > KeyMax ) {
- OCCASIONALLY problem() << "unindex: key too large to index, skipping " << id.indexNamespace() << /* ' ' << key.toString() << */ '\n';
- return false;
- }
-
- int pos;
- bool found;
- DiskLoc loc = locate(thisLoc, key, pos, found, recordLoc, 1);
- if( found ) {
- loc.btree()->delKeyAtPos(loc, id, pos);
- return true;
- }
- return false;
+ if ( key.objsize() > KeyMax ) {
+ OCCASIONALLY problem() << "unindex: key too large to index, skipping " << id.indexNamespace() << /* ' ' << key.toString() << */ '\n';
+ return false;
+ }
+
+ int pos;
+ bool found;
+ DiskLoc loc = locate(thisLoc, key, pos, found, recordLoc, 1);
+ if ( found ) {
+ loc.btree()->delKeyAtPos(loc, id, pos);
+ return true;
+ }
+ return false;
}
-BtreeBucket* BtreeBucket::allocTemp() {
- BtreeBucket *b = (BtreeBucket*) malloc(BucketSize);
- b->init();
- return b;
+BtreeBucket* BtreeBucket::allocTemp() {
+ BtreeBucket *b = (BtreeBucket*) malloc(BucketSize);
+ b->init();
+ return b;
}
-inline void fix(const DiskLoc& thisLoc, const DiskLoc& child) {
- if( !child.isNull() ) {
- if( insert_debug )
- cout << " " << child.toString() << ".parent=" << thisLoc.toString() << endl;
- child.btree()->parent = thisLoc;
- }
+inline void fix(const DiskLoc& thisLoc, const DiskLoc& child) {
+ if ( !child.isNull() ) {
+ if ( insert_debug )
+ cout << " " << child.toString() << ".parent=" << thisLoc.toString() << endl;
+ child.btree()->parent = thisLoc;
+ }
}
/* this sucks. maybe get rid of parent ptrs. */
-void BtreeBucket::fixParentPtrs(const DiskLoc& thisLoc) {
- dassert( thisLoc.btree() == this );
- fix(thisLoc, nextChild);
- for( int i = 0; i < n; i++ )
- fix(thisLoc, k(i).prevChildBucket);
+void BtreeBucket::fixParentPtrs(const DiskLoc& thisLoc) {
+ dassert( thisLoc.btree() == this );
+ fix(thisLoc, nextChild);
+ for ( int i = 0; i < n; i++ )
+ fix(thisLoc, k(i).prevChildBucket);
}
/* keypos - where to insert the key i3n range 0..n. 0=make leftmost, n=make rightmost.
*/
-void BtreeBucket::insertHere(DiskLoc thisLoc, int keypos,
- DiskLoc recordLoc, BSONObj& key,
- DiskLoc lchild, DiskLoc rchild, IndexDetails& idx)
+void BtreeBucket::insertHere(DiskLoc thisLoc, int keypos,
+ DiskLoc recordLoc, BSONObj& key,
+ DiskLoc lchild, DiskLoc rchild, IndexDetails& idx)
{
- dassert( thisLoc.btree() == this );
- if( insert_debug )
- cout << " " << thisLoc.toString() << ".insertHere " << key.toString() << '/' << recordLoc.toString() << ' '
- << lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
-
- DiskLoc oldLoc = thisLoc;
-
- if( basicInsert(keypos, recordLoc, key) ) {
- _KeyNode& kn = k(keypos);
- if( keypos+1 == n ) { // last key
- if( nextChild != lchild ) {
- cout << "ERROR nextChild != lchild" << endl;
- cout << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
- cout << " keyPos: " << keypos << " n:" << n << endl;
- cout << " nextChild: " << nextChild.toString() << " lchild: " << lchild.toString() << endl;
- cout << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
- cout << " key: " << key.toString() << endl;
- dump();
+ dassert( thisLoc.btree() == this );
+ if ( insert_debug )
+ cout << " " << thisLoc.toString() << ".insertHere " << key.toString() << '/' << recordLoc.toString() << ' '
+ << lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
+
+ DiskLoc oldLoc = thisLoc;
+
+ if ( basicInsert(keypos, recordLoc, key) ) {
+ _KeyNode& kn = k(keypos);
+ if ( keypos+1 == n ) { // last key
+ if ( nextChild != lchild ) {
+ cout << "ERROR nextChild != lchild" << endl;
+ cout << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
+ cout << " keyPos: " << keypos << " n:" << n << endl;
+ cout << " nextChild: " << nextChild.toString() << " lchild: " << lchild.toString() << endl;
+ cout << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
+ cout << " key: " << key.toString() << endl;
+ dump();
#if defined(_WIN32)
- cout << "\n\nDUMPING FULL INDEX" << endl;
- bt_dmp=1;
- bt_fv=1;
- idx.head.btree()->fullValidate(idx.head);
+ cout << "\n\nDUMPING FULL INDEX" << endl;
+ bt_dmp=1;
+ bt_fv=1;
+ idx.head.btree()->fullValidate(idx.head);
#endif
- assert(false);
- }
- kn.prevChildBucket = nextChild;
- assert( kn.prevChildBucket == lchild );
- nextChild = rchild;
- if( !rchild.isNull() )
- rchild.btree()->parent = thisLoc;
- }
- else {
- k(keypos).prevChildBucket = lchild;
- if( k(keypos+1).prevChildBucket != lchild ) {
- cout << "ERROR k(keypos+1).prevChildBucket != lchild" << endl;
- cout << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
- cout << " keyPos: " << keypos << " n:" << n << endl;
- cout << " k(keypos+1).pcb: " << k(keypos+1).prevChildBucket.toString() << " lchild: " << lchild.toString() << endl;
- cout << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
- cout << " key: " << key.toString() << endl;
- dump();
+ assert(false);
+ }
+ kn.prevChildBucket = nextChild;
+ assert( kn.prevChildBucket == lchild );
+ nextChild = rchild;
+ if ( !rchild.isNull() )
+ rchild.btree()->parent = thisLoc;
+ }
+ else {
+ k(keypos).prevChildBucket = lchild;
+ if ( k(keypos+1).prevChildBucket != lchild ) {
+ cout << "ERROR k(keypos+1).prevChildBucket != lchild" << endl;
+ cout << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
+ cout << " keyPos: " << keypos << " n:" << n << endl;
+ cout << " k(keypos+1).pcb: " << k(keypos+1).prevChildBucket.toString() << " lchild: " << lchild.toString() << endl;
+ cout << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
+ cout << " key: " << key.toString() << endl;
+ dump();
#if defined(_WIN32)
- cout << "\n\nDUMPING FULL INDEX" << endl;
- bt_dmp=1;
- bt_fv=1;
- idx.head.btree()->fullValidate(idx.head);
+ cout << "\n\nDUMPING FULL INDEX" << endl;
+ bt_dmp=1;
+ bt_fv=1;
+ idx.head.btree()->fullValidate(idx.head);
#endif
- assert(false);
- }
- k(keypos+1).prevChildBucket = rchild;
- if( !rchild.isNull() )
- rchild.btree()->parent = thisLoc;
- }
- return;
- }
-
- // split
- if( split_debug )
- cout << " " << thisLoc.toString() << ".split" << endl;
-
- int mid = n / 2;
-
- /* on duplicate key, we need to ensure that they all end up on the RHS */
- if( 0 ) {
- assert(mid>0);
- while( 1 ) {
- KeyNode mn = keyNode(mid);
- KeyNode left = keyNode(mid-1);
- if( left.key < mn.key )
- break;
- mid--;
- if( mid < 3 ) {
- problem() << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
- cout << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
- cout << " ns:" << idx.indexNamespace() << endl;
- cout << " key:" << mn.key.toString() << endl;
- break;
- }
- }
- }
-
- BtreeBucket *r = allocTemp();
- DiskLoc rLoc;
-
- if( split_debug )
- cout << " mid:" << mid << ' ' << keyNode(mid).key.toString() << " n:" << n << endl;
- for( int i = mid+1; i < n; i++ ) {
- KeyNode kn = keyNode(i);
- r->pushBack(kn.recordLoc, kn.key, kn.prevChildBucket);
- }
- r->nextChild = nextChild;
- r->assertValid();
+ assert(false);
+ }
+ k(keypos+1).prevChildBucket = rchild;
+ if ( !rchild.isNull() )
+ rchild.btree()->parent = thisLoc;
+ }
+ return;
+ }
+
+ // split
+ if ( split_debug )
+ cout << " " << thisLoc.toString() << ".split" << endl;
+
+ int mid = n / 2;
+
+ /* on duplicate key, we need to ensure that they all end up on the RHS */
+ if ( 0 ) {
+ assert(mid>0);
+ while ( 1 ) {
+ KeyNode mn = keyNode(mid);
+ KeyNode left = keyNode(mid-1);
+ if ( left.key < mn.key )
+ break;
+ mid--;
+ if ( mid < 3 ) {
+ problem() << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
+ cout << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
+ cout << " ns:" << idx.indexNamespace() << endl;
+ cout << " key:" << mn.key.toString() << endl;
+ break;
+ }
+ }
+ }
+
+ BtreeBucket *r = allocTemp();
+ DiskLoc rLoc;
+
+ if ( split_debug )
+ cout << " mid:" << mid << ' ' << keyNode(mid).key.toString() << " n:" << n << endl;
+ for ( int i = mid+1; i < n; i++ ) {
+ KeyNode kn = keyNode(i);
+ r->pushBack(kn.recordLoc, kn.key, kn.prevChildBucket);
+ }
+ r->nextChild = nextChild;
+ r->assertValid();
//r->dump();
- rLoc = theDataFileMgr.insert(idx.indexNamespace().c_str(), r, r->Size(), true);
- if( split_debug )
- cout << " new rLoc:" << rLoc.toString() << endl;
- free(r); r = 0;
- rLoc.btree()->fixParentPtrs(rLoc);
-
- {
- KeyNode middle = keyNode(mid);
- nextChild = middle.prevChildBucket; // middle key gets promoted, its children will be thisLoc (l) and rLoc (r)
- if( split_debug ) {
- //rLoc.btree()->dump();
- cout << " middle key:" << middle.key.toString() << endl;
- }
-
- // promote middle to a parent node
- if( parent.isNull() ) {
- // make a new parent if we were the root
- BtreeBucket *p = allocTemp();
- p->pushBack(middle.recordLoc, middle.key, thisLoc);
- p->nextChild = rLoc;
- p->assertValid();
- parent = idx.head = theDataFileMgr.insert(idx.indexNamespace().c_str(), p, p->Size(), true);
- if( split_debug )
- cout << " we were root, making new root:" << hex << parent.getOfs() << dec << endl;
- free(p);
- rLoc.btree()->parent = parent;
- }
- else {
- /* set this before calling _insert - if it splits it will do fixParent() logic and fix the value,
- so we don't want to overwrite that if it happens.
- */
- rLoc.btree()->parent = parent;
- if( split_debug )
- cout << " promoting middle key " << middle.key.toString() << endl;
- parent.btree()->_insert(parent, middle.recordLoc, middle.key, false, thisLoc, rLoc, idx);
- }
+ rLoc = theDataFileMgr.insert(idx.indexNamespace().c_str(), r, r->Size(), true);
+ if ( split_debug )
+ cout << " new rLoc:" << rLoc.toString() << endl;
+ free(r);
+ r = 0;
+ rLoc.btree()->fixParentPtrs(rLoc);
+
+ {
+ KeyNode middle = keyNode(mid);
+ nextChild = middle.prevChildBucket; // middle key gets promoted, its children will be thisLoc (l) and rLoc (r)
+ if ( split_debug ) {
+ //rLoc.btree()->dump();
+ cout << " middle key:" << middle.key.toString() << endl;
+ }
+
+ // promote middle to a parent node
+ if ( parent.isNull() ) {
+ // make a new parent if we were the root
+ BtreeBucket *p = allocTemp();
+ p->pushBack(middle.recordLoc, middle.key, thisLoc);
+ p->nextChild = rLoc;
+ p->assertValid();
+ parent = idx.head = theDataFileMgr.insert(idx.indexNamespace().c_str(), p, p->Size(), true);
+ if ( split_debug )
+ cout << " we were root, making new root:" << hex << parent.getOfs() << dec << endl;
+ free(p);
+ rLoc.btree()->parent = parent;
+ }
+ else {
+ /* set this before calling _insert - if it splits it will do fixParent() logic and fix the value,
+ so we don't want to overwrite that if it happens.
+ */
+ rLoc.btree()->parent = parent;
+ if ( split_debug )
+ cout << " promoting middle key " << middle.key.toString() << endl;
+ parent.btree()->_insert(parent, middle.recordLoc, middle.key, false, thisLoc, rLoc, idx);
+ }
//BtreeBucket *br = rLoc.btree();
//br->dump();
//parent.btree()->dump();
//idx.head.btree()->dump();
- }
+ }
- truncateTo(mid); // note this may trash middle.key! thus we had to promote it before finishing up here.
+ truncateTo(mid); // note this may trash middle.key! thus we had to promote it before finishing up here.
+
+ // add our new key, there is room now
+ {
- // add our new key, there is room now
- {
-
//dump();
- if( keypos <= mid ) {
+ if ( keypos <= mid ) {
// if( keypos < mid ) {
- if( split_debug )
- cout << " keypos<mid, insertHere() the new key" << endl;
- insertHere(thisLoc, keypos, recordLoc, key, lchild, rchild, idx);
+ if ( split_debug )
+ cout << " keypos<mid, insertHere() the new key" << endl;
+ insertHere(thisLoc, keypos, recordLoc, key, lchild, rchild, idx);
//dump();
- } else {
- int kp = keypos-mid-1; assert(kp>=0);
- rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, lchild, rchild, idx);
+ } else {
+ int kp = keypos-mid-1;
+ assert(kp>=0);
+ rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, lchild, rchild, idx);
// set a bp here.
// if( !lchild.isNull() ) cout << lchild.btree()->parent.toString() << endl;
// if( !rchild.isNull() ) cout << rchild.btree()->parent.toString() << endl;
- }
- }
+ }
+ }
- if( split_debug )
- cout << " split end " << hex << thisLoc.getOfs() << dec << endl;
+ if ( split_debug )
+ cout << " split end " << hex << thisLoc.getOfs() << dec << endl;
}
/* start a new index off, empty */
DiskLoc BtreeBucket::addHead(IndexDetails& id) {
- BtreeBucket *p = allocTemp();
- DiskLoc loc = theDataFileMgr.insert(id.indexNamespace().c_str(), p, p->Size(), true);
- free(p);
- return loc;
+ BtreeBucket *p = allocTemp();
+ DiskLoc loc = theDataFileMgr.insert(id.indexNamespace().c_str(), p, p->Size(), true);
+ free(p);
+ return loc;
}
DiskLoc BtreeBucket::getHead(const DiskLoc& thisLoc) {
- DiskLoc p = thisLoc;
- while( !p.btree()->isHead() )
- p = p.btree()->parent;
- return p;
+ DiskLoc p = thisLoc;
+ while ( !p.btree()->isHead() )
+ p = p.btree()->parent;
+ return p;
}
DiskLoc BtreeBucket::advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
- if( keyOfs < 0 || keyOfs >= n ) {
- cout << "ASSERT failure BtreeBucket::advance, caller: " << caller << endl;
- cout << " thisLoc: " << thisLoc.toString() << endl;
- cout << " keyOfs: " << keyOfs << " n:" << n << " direction: " << direction << endl;
- cout << bucketSummary() << endl;
- assert(false);
- }
- int adj = direction < 0 ? 1 : 0;
- int ko = keyOfs + direction;
- DiskLoc nextDown = childForPos(ko+adj);
- if( !nextDown.isNull() ) {
- while( 1 ) {
- keyOfs = direction>0 ? 0 : nextDown.btree()->n - 1;
- DiskLoc loc= nextDown.btree()->childForPos(keyOfs + adj);
- if( loc.isNull() )
- break;
- nextDown = loc;
- }
- return nextDown;
- }
-
- if( ko < n && ko >= 0 ) {
- keyOfs = ko;
- return thisLoc;
- }
-
- // end of bucket. traverse back up.
- DiskLoc childLoc = thisLoc;
- DiskLoc ancestor = parent;
- while( 1 ) {
- if( ancestor.isNull() )
- break;
- BtreeBucket *an = ancestor.btree();
- for( int i = 0; i < an->n; i++ ) {
- if( an->childForPos(i+adj) == childLoc ) {
- keyOfs = i;
- return ancestor;
- }
- }
- assert( direction<0 || an->nextChild == childLoc );
- // parent exhausted also, keep going up
- childLoc = ancestor;
- ancestor = an->parent;
- }
-
- return DiskLoc();
-}
-
-DiskLoc BtreeBucket::locate(const DiskLoc& thisLoc, BSONObj& key, int& pos, bool& found, DiskLoc recordLoc, int direction) {
- int p;
- found = find(key, recordLoc, p);
- if( found ) {
- pos = p;
- return thisLoc;
- }
-
- DiskLoc child = childForPos(p);
-
- if( !child.isNull() ) {
- DiskLoc l = child.btree()->locate(child, key, pos, found, recordLoc, direction);
- if( !l.isNull() )
- return l;
- }
-
- pos = p;
- if ( direction < 0 )
- return --pos == -1 ? DiskLoc() /*theend*/ : thisLoc;
- else
- return pos == n ? DiskLoc() /*theend*/ : thisLoc;
+ if ( keyOfs < 0 || keyOfs >= n ) {
+ cout << "ASSERT failure BtreeBucket::advance, caller: " << caller << endl;
+ cout << " thisLoc: " << thisLoc.toString() << endl;
+ cout << " keyOfs: " << keyOfs << " n:" << n << " direction: " << direction << endl;
+ cout << bucketSummary() << endl;
+ assert(false);
+ }
+ int adj = direction < 0 ? 1 : 0;
+ int ko = keyOfs + direction;
+ DiskLoc nextDown = childForPos(ko+adj);
+ if ( !nextDown.isNull() ) {
+ while ( 1 ) {
+ keyOfs = direction>0 ? 0 : nextDown.btree()->n - 1;
+ DiskLoc loc= nextDown.btree()->childForPos(keyOfs + adj);
+ if ( loc.isNull() )
+ break;
+ nextDown = loc;
+ }
+ return nextDown;
+ }
+
+ if ( ko < n && ko >= 0 ) {
+ keyOfs = ko;
+ return thisLoc;
+ }
+
+ // end of bucket. traverse back up.
+ DiskLoc childLoc = thisLoc;
+ DiskLoc ancestor = parent;
+ while ( 1 ) {
+ if ( ancestor.isNull() )
+ break;
+ BtreeBucket *an = ancestor.btree();
+ for ( int i = 0; i < an->n; i++ ) {
+ if ( an->childForPos(i+adj) == childLoc ) {
+ keyOfs = i;
+ return ancestor;
+ }
+ }
+ assert( direction<0 || an->nextChild == childLoc );
+ // parent exhausted also, keep going up
+ childLoc = ancestor;
+ ancestor = an->parent;
+ }
+
+ return DiskLoc();
+}
+
+DiskLoc BtreeBucket::locate(const DiskLoc& thisLoc, BSONObj& key, int& pos, bool& found, DiskLoc recordLoc, int direction) {
+ int p;
+ found = find(key, recordLoc, p);
+ if ( found ) {
+ pos = p;
+ return thisLoc;
+ }
+
+ DiskLoc child = childForPos(p);
+
+ if ( !child.isNull() ) {
+ DiskLoc l = child.btree()->locate(child, key, pos, found, recordLoc, direction);
+ if ( !l.isNull() )
+ return l;
+ }
+
+ pos = p;
+ if ( direction < 0 )
+ return --pos == -1 ? DiskLoc() /*theend*/ : thisLoc;
+ else
+ return pos == n ? DiskLoc() /*theend*/ : thisLoc;
}
/* thisloc is the location of this bucket object. you must pass that in. */
-int BtreeBucket::_insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, bool dupsAllowed,
- DiskLoc lChild, DiskLoc rChild, IndexDetails& idx) {
- if( key.objsize() > KeyMax ) {
- problem() << "ERROR: key too large len:" << key.objsize() << " max:" << KeyMax << ' ' << idx.indexNamespace() << endl;
- return 2;
- }
- assert( key.objsize() > 0 );
-
- int pos;
- bool found = find(key, recordLoc, pos);
- if( insert_debug ) {
- cout << " " << thisLoc.toString() << '.' << "_insert " <<
- key.toString() << '/' << recordLoc.toString() <<
- " l:" << lChild.toString() << " r:" << rChild.toString() << endl;
- cout << " found:" << found << " pos:" << pos << " n:" << n << endl;
- }
-
- if( found ) {
- if( k(pos).isUnused() ) {
- cout << "an unused already occupying keyslot, write more code.\n";
- cout << " index may be corrupt (missing data) now.\n";
- }
-
- cout << "_insert(): key already exists in index\n";
- cout << " " << idx.indexNamespace().c_str() << " thisLoc:" << thisLoc.toString() << '\n';
- cout << " " << key.toString() << '\n';
- cout << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
- cout << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
- cout << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
- assert(false);
-
- // on a dup key always insert on the right or else you will be broken.
+int BtreeBucket::_insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, bool dupsAllowed,
+ DiskLoc lChild, DiskLoc rChild, IndexDetails& idx) {
+ if ( key.objsize() > KeyMax ) {
+ problem() << "ERROR: key too large len:" << key.objsize() << " max:" << KeyMax << ' ' << idx.indexNamespace() << endl;
+ return 2;
+ }
+ assert( key.objsize() > 0 );
+
+ int pos;
+ bool found = find(key, recordLoc, pos);
+ if ( insert_debug ) {
+ cout << " " << thisLoc.toString() << '.' << "_insert " <<
+ key.toString() << '/' << recordLoc.toString() <<
+ " l:" << lChild.toString() << " r:" << rChild.toString() << endl;
+ cout << " found:" << found << " pos:" << pos << " n:" << n << endl;
+ }
+
+ if ( found ) {
+ if ( k(pos).isUnused() ) {
+ cout << "an unused already occupying keyslot, write more code.\n";
+ cout << " index may be corrupt (missing data) now.\n";
+ }
+
+ cout << "_insert(): key already exists in index\n";
+ cout << " " << idx.indexNamespace().c_str() << " thisLoc:" << thisLoc.toString() << '\n';
+ cout << " " << key.toString() << '\n';
+ cout << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
+ cout << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
+ cout << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
+ assert(false);
+
+ // on a dup key always insert on the right or else you will be broken.
// pos++;
- // on a promotion, find the right point to update if dup keys.
- /* not needed: we always insert right after the first key so we are ok with just pos++...
- if( !rChild.isNull() ) {
- while( pos < n && k(pos).prevChildBucket != lchild ) {
- pos++;
- cout << "looking for the right dup key" << endl;
- }
- }
- */
- }
-
- DEBUGGING cout << "TEMP: key: " << key.toString() << endl;
- DiskLoc& child = getChild(pos);
- if( insert_debug )
- cout << " getChild(" << pos << "): " << child.toString() << endl;
- if( child.isNull() || !rChild.isNull() /* means an 'internal' insert */ ) {
- insertHere(thisLoc, pos, recordLoc, key, lChild, rChild, idx);
- return 0;
- }
-
- return child.btree()->insert(child, recordLoc, key, dupsAllowed, idx, false);
-}
-
-void BtreeBucket::dump() {
- cout << "DUMP btreebucket: ";
- cout << " parent:" << hex << parent.getOfs() << dec;
- for( int i = 0; i < n; i++ ) {
- cout << '\n';
- KeyNode k = keyNode(i);
- cout << '\t' << i << '\t' << k.key.toString() << "\tleft:" << hex <<
- k.prevChildBucket.getOfs() << "\trec:" << k.recordLoc.getOfs() << dec;
- if( this->k(i).isUnused() )
- cout << " UNUSED";
- }
- cout << " right:" << hex << nextChild.getOfs() << dec << endl;
+ // on a promotion, find the right point to update if dup keys.
+ /* not needed: we always insert right after the first key so we are ok with just pos++...
+ if( !rChild.isNull() ) {
+ while( pos < n && k(pos).prevChildBucket != lchild ) {
+ pos++;
+ cout << "looking for the right dup key" << endl;
+ }
+ }
+ */
+ }
+
+ DEBUGGING cout << "TEMP: key: " << key.toString() << endl;
+ DiskLoc& child = getChild(pos);
+ if ( insert_debug )
+ cout << " getChild(" << pos << "): " << child.toString() << endl;
+ if ( child.isNull() || !rChild.isNull() /* means an 'internal' insert */ ) {
+ insertHere(thisLoc, pos, recordLoc, key, lChild, rChild, idx);
+ return 0;
+ }
+
+ return child.btree()->insert(child, recordLoc, key, dupsAllowed, idx, false);
+}
+
+void BtreeBucket::dump() {
+ cout << "DUMP btreebucket: ";
+ cout << " parent:" << hex << parent.getOfs() << dec;
+ for ( int i = 0; i < n; i++ ) {
+ cout << '\n';
+ KeyNode k = keyNode(i);
+ cout << '\t' << i << '\t' << k.key.toString() << "\tleft:" << hex <<
+ k.prevChildBucket.getOfs() << "\trec:" << k.recordLoc.getOfs() << dec;
+ if ( this->k(i).isUnused() )
+ cout << " UNUSED";
+ }
+ cout << " right:" << hex << nextChild.getOfs() << dec << endl;
}
/* todo: meaning of return code unclear clean up */
-int BtreeBucket::insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, bool dupsAllowed, IndexDetails& idx, bool toplevel)
+int BtreeBucket::insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, bool dupsAllowed, IndexDetails& idx, bool toplevel)
{
- if( toplevel ) {
- if( key.objsize() > KeyMax ) {
- problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace().c_str() << ' ' << key.toString() << '\n';
- return 3;
- }
- ++ninserts;
- /*
- if( ninserts % 1000 == 0 ) {
- cout << "ninserts: " << ninserts << endl;
- if( 0 && ninserts >= 127287 ) {
- cout << "debug?" << endl;
- split_debug = 1;
- }
- }
- */
- }
-
- int x = _insert(thisLoc, recordLoc, key, dupsAllowed, DiskLoc(), DiskLoc(), idx);
- assertValid();
-
- return x;
+ if ( toplevel ) {
+ if ( key.objsize() > KeyMax ) {
+ problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace().c_str() << ' ' << key.toString() << '\n';
+ return 3;
+ }
+ ++ninserts;
+ /*
+ if( ninserts % 1000 == 0 ) {
+ cout << "ninserts: " << ninserts << endl;
+ if( 0 && ninserts >= 127287 ) {
+ cout << "debug?" << endl;
+ split_debug = 1;
+ }
+ }
+ */
+ }
+
+ int x = _insert(thisLoc, recordLoc, key, dupsAllowed, DiskLoc(), DiskLoc(), idx);
+ assertValid();
+
+ return x;
}
void BtreeBucket::shape(stringstream& ss) {
- _shape(0, ss);
+ _shape(0, ss);
}
diff --git a/db/btree.h b/db/btree.h
index 2439784df15..d5f3f334389 100644
--- a/db/btree.h
+++ b/db/btree.h
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -26,22 +26,34 @@
#pragma pack(push,1)
struct _KeyNode {
- DiskLoc prevChildBucket;
- DiskLoc recordLoc;
- short keyDataOfs() { return (short) _kdo; }
- unsigned short _kdo;
- void setKeyDataOfs(short s) { _kdo = s; assert(s>=0); }
- void setKeyDataOfsSavingUse(short s) { _kdo = s; assert(s>=0); }
- void setUnused() {
- /* Setting ofs to odd is the sentinel for unused, as real recordLoc's are always
- even numbers.
- Note we need to keep its value basically the same as we use the recordLoc
- as part of the key in the index (to handle duplicate keys efficiently).
- */
- recordLoc.GETOFS() |= 1;
- }
- int isUnused() { return recordLoc.getOfs() & 1; }
- int isUsed() { return !isUnused(); }
+ DiskLoc prevChildBucket;
+ DiskLoc recordLoc;
+ short keyDataOfs() {
+ return (short) _kdo;
+ }
+ unsigned short _kdo;
+ void setKeyDataOfs(short s) {
+ _kdo = s;
+ assert(s>=0);
+ }
+ void setKeyDataOfsSavingUse(short s) {
+ _kdo = s;
+ assert(s>=0);
+ }
+ void setUnused() {
+ /* Setting ofs to odd is the sentinel for unused, as real recordLoc's are always
+ even numbers.
+ Note we need to keep its value basically the same as we use the recordLoc
+ as part of the key in the index (to handle duplicate keys efficiently).
+ */
+ recordLoc.GETOFS() |= 1;
+ }
+ int isUnused() {
+ return recordLoc.getOfs() & 1;
+ }
+ int isUsed() {
+ return !isUnused();
+ }
};
#pragma pack(pop)
@@ -51,182 +63,204 @@ class BucketBasics;
/* wrapper - this is our in memory representation of the key. _KeyNode is the disk representation. */
class KeyNode {
public:
- KeyNode(BucketBasics& bb, _KeyNode &k);
- DiskLoc& prevChildBucket;
- DiskLoc& recordLoc;
- BSONObj key;
+ KeyNode(BucketBasics& bb, _KeyNode &k);
+ DiskLoc& prevChildBucket;
+ DiskLoc& recordLoc;
+ BSONObj key;
};
#pragma pack(push,1)
/* this class is all about the storage management */
class BucketBasics {
- friend class KeyNode;
+ friend class KeyNode;
public:
- void dumpTree(DiskLoc thisLoc);
- bool isHead() { return parent.isNull(); }
- void assertValid(bool force = false);
- int fullValidate(const DiskLoc& thisLoc); /* traverses everything */
+ void dumpTree(DiskLoc thisLoc);
+ bool isHead() {
+ return parent.isNull();
+ }
+ void assertValid(bool force = false);
+ int fullValidate(const DiskLoc& thisLoc); /* traverses everything */
protected:
- DiskLoc& getChild(int pos) {
- assert( pos >= 0 && pos <= n );
- return pos == n ? nextChild : k(pos).prevChildBucket;
- }
- KeyNode keyNode(int i) {
- assert( i < n );
- return KeyNode(*this, k(i));
- }
-
- char * dataAt(short ofs) { return data + ofs; }
-
- void init(); // initialize a new node
-
- /* returns false if node is full and must be split
- keypos is where to insert -- inserted after that key #. so keypos=0 is the leftmost one.
- */
- bool basicInsert(int keypos, const DiskLoc& recordLoc, BSONObj& key);
- void pushBack(const DiskLoc& recordLoc, BSONObj& key, DiskLoc prevChild);
- void _delKeyAtPos(int keypos); // low level version that doesn't deal with child ptrs.
-
- /* !Packed means there is deleted fragment space within the bucket.
+ DiskLoc& getChild(int pos) {
+ assert( pos >= 0 && pos <= n );
+ return pos == n ? nextChild : k(pos).prevChildBucket;
+ }
+ KeyNode keyNode(int i) {
+ assert( i < n );
+ return KeyNode(*this, k(i));
+ }
+
+ char * dataAt(short ofs) {
+ return data + ofs;
+ }
+
+ void init(); // initialize a new node
+
+ /* returns false if node is full and must be split
+ keypos is where to insert -- inserted after that key #. so keypos=0 is the leftmost one.
+ */
+ bool basicInsert(int keypos, const DiskLoc& recordLoc, BSONObj& key);
+ void pushBack(const DiskLoc& recordLoc, BSONObj& key, DiskLoc prevChild);
+ void _delKeyAtPos(int keypos); // low level version that doesn't deal with child ptrs.
+
+ /* !Packed means there is deleted fragment space within the bucket.
We "repack" when we run out of space before considering the node
- to be full.
- */
- enum Flags { Packed=1 };
-
- DiskLoc childForPos(int p) {
- return p == n ? nextChild : k(p).prevChildBucket;
- }
-
- int totalDataSize() const;
- void pack(); void setNotPacked(); void setPacked();
- int _alloc(int bytes);
- void truncateTo(int N);
- void markUnused(int keypos);
+ to be full.
+ */
+ enum Flags { Packed=1 };
+
+ DiskLoc childForPos(int p) {
+ return p == n ? nextChild : k(p).prevChildBucket;
+ }
+
+ int totalDataSize() const;
+ void pack();
+ void setNotPacked();
+ void setPacked();
+ int _alloc(int bytes);
+ void truncateTo(int N);
+ void markUnused(int keypos);
public:
- DiskLoc parent;
-
- string bucketSummary() const {
- stringstream ss;
- ss << " Bucket info:" << endl;
- ss << " n: " << n << endl;
- ss << " parent: " << parent.toString() << endl;
- ss << " nextChild: " << parent.toString() << endl;
- ss << " Size: " << _Size << " flags:" << flags << endl;
- ss << " emptySize: " << emptySize << " topSize: " << topSize << endl;
- return ss.str();
- }
+ DiskLoc parent;
+
+ string bucketSummary() const {
+ stringstream ss;
+ ss << " Bucket info:" << endl;
+ ss << " n: " << n << endl;
+ ss << " parent: " << parent.toString() << endl;
+ ss << " nextChild: " << parent.toString() << endl;
+ ss << " Size: " << _Size << " flags:" << flags << endl;
+ ss << " emptySize: " << emptySize << " topSize: " << topSize << endl;
+ return ss.str();
+ }
protected:
- void _shape(int level, stringstream&);
- DiskLoc nextChild; // child bucket off and to the right of the highest key.
- int _Size; // total size of this btree node in bytes. constant.
- int Size() const;
- int flags;
- int emptySize; // size of the empty region
- int topSize; // size of the data at the top of the bucket (keys are at the beginning or 'bottom')
- int n; // # of keys so far.
- int reserved;
- _KeyNode& k(int i) { return ((_KeyNode*)data)[i]; }
- char data[4];
+ void _shape(int level, stringstream&);
+ DiskLoc nextChild; // child bucket off and to the right of the highest key.
+ int _Size; // total size of this btree node in bytes. constant.
+ int Size() const;
+ int flags;
+ int emptySize; // size of the empty region
+ int topSize; // size of the data at the top of the bucket (keys are at the beginning or 'bottom')
+ int n; // # of keys so far.
+ int reserved;
+ _KeyNode& k(int i) {
+ return ((_KeyNode*)data)[i];
+ }
+ char data[4];
};
-class BtreeBucket : public BucketBasics {
- friend class BtreeCursor;
+class BtreeBucket : public BucketBasics {
+ friend class BtreeCursor;
public:
- void dump();
+ void dump();
- static DiskLoc addHead(IndexDetails&); /* start a new index off, empty */
- int insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, bool dupsAllowed, IndexDetails& idx, bool toplevel);
+ static DiskLoc addHead(IndexDetails&); /* start a new index off, empty */
+ int insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, bool dupsAllowed, IndexDetails& idx, bool toplevel);
- bool unindex(const DiskLoc& thisLoc, IndexDetails& id, BSONObj& key, const DiskLoc& recordLoc);
+ bool unindex(const DiskLoc& thisLoc, IndexDetails& id, BSONObj& key, const DiskLoc& recordLoc);
- /* locate may return an "unused" key that is just a marker. so be careful.
- looks for a key:recordloc pair.
- */
- DiskLoc locate(const DiskLoc& thisLoc, BSONObj& key, int& pos, bool& found, DiskLoc recordLoc, int direction=1);
+ /* locate may return an "unused" key that is just a marker. so be careful.
+ looks for a key:recordloc pair.
+ */
+ DiskLoc locate(const DiskLoc& thisLoc, BSONObj& key, int& pos, bool& found, DiskLoc recordLoc, int direction=1);
- /* advance one key position in the index: */
- DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller);
- DiskLoc getHead(const DiskLoc& thisLoc);
+ /* advance one key position in the index: */
+ DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller);
+ DiskLoc getHead(const DiskLoc& thisLoc);
- /* get tree shape */
- void shape(stringstream&);
+ /* get tree shape */
+ void shape(stringstream&);
private:
- void fixParentPtrs(const DiskLoc& thisLoc);
- void delBucket(const DiskLoc& thisLoc, IndexDetails&);
- void delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p);
- BSONObj keyAt(int keyOfs) { return keyOfs >= n ? BSONObj() : keyNode(keyOfs).key; }
- static BtreeBucket* allocTemp(); /* caller must release with free() */
- void insertHere(DiskLoc thisLoc, int keypos,
- DiskLoc recordLoc, BSONObj& key,
- DiskLoc lchild, DiskLoc rchild, IndexDetails&);
- int _insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, bool dupsAllowed,
- DiskLoc lChild, DiskLoc rChild, IndexDetails&);
- bool find(BSONObj& key, DiskLoc recordLoc, int& pos);
- static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
+ void fixParentPtrs(const DiskLoc& thisLoc);
+ void delBucket(const DiskLoc& thisLoc, IndexDetails&);
+ void delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p);
+ BSONObj keyAt(int keyOfs) {
+ return keyOfs >= n ? BSONObj() : keyNode(keyOfs).key;
+ }
+ static BtreeBucket* allocTemp(); /* caller must release with free() */
+ void insertHere(DiskLoc thisLoc, int keypos,
+ DiskLoc recordLoc, BSONObj& key,
+ DiskLoc lchild, DiskLoc rchild, IndexDetails&);
+ int _insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, bool dupsAllowed,
+ DiskLoc lChild, DiskLoc rChild, IndexDetails&);
+ bool find(BSONObj& key, DiskLoc recordLoc, int& pos);
+ static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
};
class BtreeCursor : public Cursor {
- friend class BtreeBucket;
- BSONObj startKey;
- BSONObj endKey;
+ friend class BtreeBucket;
+ BSONObj startKey;
+ BSONObj endKey;
// BSONObj query; // the query we are working on in association with the cursor -- see noMoreMatches()
public:
- BtreeCursor(IndexDetails&, const BSONObj& startKey, int direction, BSONObj& query);
- virtual bool ok() { return !bucket.isNull(); }
- bool eof() { return !ok(); }
- virtual bool advance();
-
- virtual void noteLocation(); // updates keyAtKeyOfs...
- virtual void checkLocation();
-
- _KeyNode& _currKeyNode() {
- assert( !bucket.isNull() );
- _KeyNode& kn = bucket.btree()->k(keyOfs);
- assert( kn.isUsed() );
- return kn;
- }
- KeyNode currKeyNode() {
- assert( !bucket.isNull() );
- return bucket.btree()->keyNode(keyOfs);
- }
- BSONObj currKey() { return currKeyNode().key; }
-
- virtual BSONObj indexKeyPattern() {
+ BtreeCursor(IndexDetails&, const BSONObj& startKey, int direction, BSONObj& query);
+ virtual bool ok() {
+ return !bucket.isNull();
+ }
+ bool eof() {
+ return !ok();
+ }
+ virtual bool advance();
+
+ virtual void noteLocation(); // updates keyAtKeyOfs...
+ virtual void checkLocation();
+
+ _KeyNode& _currKeyNode() {
+ assert( !bucket.isNull() );
+ _KeyNode& kn = bucket.btree()->k(keyOfs);
+ assert( kn.isUsed() );
+ return kn;
+ }
+ KeyNode currKeyNode() {
+ assert( !bucket.isNull() );
+ return bucket.btree()->keyNode(keyOfs);
+ }
+ BSONObj currKey() {
+ return currKeyNode().key;
+ }
+
+ virtual BSONObj indexKeyPattern() {
return indexDetails.keyPattern();
}
- virtual void aboutToDeleteBucket(const DiskLoc& b) {
- if( bucket == b )
- keyOfs = -1;
- }
+ virtual void aboutToDeleteBucket(const DiskLoc& b) {
+ if ( bucket == b )
+ keyOfs = -1;
+ }
- virtual DiskLoc currLoc() { return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc(); }
- virtual Record* _current() { return currLoc().rec(); }
- virtual BSONObj current() { return BSONObj(_current()); }
- virtual string toString() {
- string s = string("BtreeCursor ") + indexDetails.indexName();
- if( direction < 0 ) s += " reverse";
+ virtual DiskLoc currLoc() {
+ return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc();
+ }
+ virtual Record* _current() {
+ return currLoc().rec();
+ }
+ virtual BSONObj current() {
+ return BSONObj(_current());
+ }
+ virtual string toString() {
+ string s = string("BtreeCursor ") + indexDetails.indexName();
+ if ( direction < 0 ) s += " reverse";
return s;
}
private:
- void findExtremeKeys( const BSONObj &query );
- void findExtremeInequalityValues( const BSONElement &e,
- BSONElement &lowest,
- BSONElement &highest );
- static void getFields( const BSONObj &key, set< string > &fields );
- void checkUnused();
- void checkEnd();
- IndexDetails& indexDetails;
- DiskLoc bucket;
- int keyOfs;
- int direction; // 1=fwd,-1=reverse
- BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
- DiskLoc locAtKeyOfs;
+ void findExtremeKeys( const BSONObj &query );
+ void findExtremeInequalityValues( const BSONElement &e,
+ BSONElement &lowest,
+ BSONElement &highest );
+ static void getFields( const BSONObj &key, set< string > &fields );
+ void checkUnused();
+ void checkEnd();
+ IndexDetails& indexDetails;
+ DiskLoc bucket;
+ int keyOfs;
+ int direction; // 1=fwd,-1=reverse
+ BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
+ DiskLoc locAtKeyOfs;
};
#pragma pack(pop)
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index b30ff58d0c0..32a965465d4 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -26,207 +26,207 @@ extern int otherTraceLevel;
DiskLoc maxDiskLoc(0x7fffffff, 0x7fffffff);
DiskLoc minDiskLoc(0, 1);
-BtreeCursor::BtreeCursor(IndexDetails& _id, const BSONObj& k, int _direction, BSONObj& _query) :
+BtreeCursor::BtreeCursor(IndexDetails& _id, const BSONObj& k, int _direction, BSONObj& _query) :
// query(_query),
- indexDetails(_id),
- direction(_direction)
+ indexDetails(_id),
+ direction(_direction)
{
//otherTraceLevel = 999;
- bool found;
- if( otherTraceLevel >= 12 ) {
- if( otherTraceLevel >= 200 ) {
- cout << "::BtreeCursor() qtl>200. validating entire index." << endl;
- indexDetails.head.btree()->fullValidate(indexDetails.head);
- }
- else {
- cout << "BTreeCursor(). dumping head bucket" << endl;
- indexDetails.head.btree()->dump();
- }
- }
-
- findExtremeKeys( _query );
- if( !k.isEmpty() )
- startKey = k;
-
- bucket = indexDetails.head.btree()->
- locate(indexDetails.head, startKey, keyOfs, found, direction > 0 ? minDiskLoc : maxDiskLoc, direction);
-
- checkUnused();
+ bool found;
+ if ( otherTraceLevel >= 12 ) {
+ if ( otherTraceLevel >= 200 ) {
+ cout << "::BtreeCursor() qtl>200. validating entire index." << endl;
+ indexDetails.head.btree()->fullValidate(indexDetails.head);
+ }
+ else {
+ cout << "BTreeCursor(). dumping head bucket" << endl;
+ indexDetails.head.btree()->dump();
+ }
+ }
+
+ findExtremeKeys( _query );
+ if ( !k.isEmpty() )
+ startKey = k;
+
+ bucket = indexDetails.head.btree()->
+ locate(indexDetails.head, startKey, keyOfs, found, direction > 0 ? minDiskLoc : maxDiskLoc, direction);
+
+ checkUnused();
}
// Given a query, find the lowest and highest keys along our index that could
// potentially match the query. These lowest and highest keys will be mapped
// to startKey and endKey based on the value of direction.
-void BtreeCursor::findExtremeKeys( const BSONObj &query ) {
- BSONObjBuilder startBuilder;
- BSONObjBuilder endBuilder;
- set< string >fields;
- getFields( indexDetails.keyPattern(), fields );
- for( set<string>::iterator i = fields.begin(); i != fields.end(); ++i ) {
- const char * field = i->c_str();
- BSONElement k = indexDetails.keyPattern().getFieldDotted( field );
+void BtreeCursor::findExtremeKeys( const BSONObj &query ) {
+ BSONObjBuilder startBuilder;
+ BSONObjBuilder endBuilder;
+ set< string >fields;
+ getFields( indexDetails.keyPattern(), fields );
+ for ( set<string>::iterator i = fields.begin(); i != fields.end(); ++i ) {
+ const char * field = i->c_str();
+ BSONElement k = indexDetails.keyPattern().getFieldDotted( field );
// int number = (int) k.number(); // returns 0.0 if not numeric
// bool forward = ( ( number >= 0 ? 1 : -1 ) * direction > 0 );
- // Temporary, until btree supports directional indexes.
- bool forward = ( direction > 0 );
- BSONElement lowest = minKey.firstElement();
- BSONElement highest = maxKey.firstElement();
- BSONElement e = query.getFieldDotted( field );
- if ( !e.eoo() && e.type() != RegEx ) {
- if ( getGtLtOp( e ) == JSMatcher::Equality )
- lowest = highest = e;
- else
- findExtremeInequalityValues( e, lowest, highest );
- }
- startBuilder.appendAs( forward ? lowest : highest, "" );
- endBuilder.appendAs( forward ? highest : lowest, "" );
- }
- startKey = startBuilder.doneAndDecouple();
- endKey = endBuilder.doneAndDecouple();
+ // Temporary, until btree supports directional indexes.
+ bool forward = ( direction > 0 );
+ BSONElement lowest = minKey.firstElement();
+ BSONElement highest = maxKey.firstElement();
+ BSONElement e = query.getFieldDotted( field );
+ if ( !e.eoo() && e.type() != RegEx ) {
+ if ( getGtLtOp( e ) == JSMatcher::Equality )
+ lowest = highest = e;
+ else
+ findExtremeInequalityValues( e, lowest, highest );
+ }
+ startBuilder.appendAs( forward ? lowest : highest, "" );
+ endBuilder.appendAs( forward ? highest : lowest, "" );
+ }
+ startKey = startBuilder.doneAndDecouple();
+ endKey = endBuilder.doneAndDecouple();
}
// Find lowest and highest possible key values given all $gt, $gte, $lt, and
// $lte elements in e. The values of lowest and highest should be
// preinitialized, for example to minKey.firstElement() and maxKey.firstElement().
void BtreeCursor::findExtremeInequalityValues( const BSONElement &e,
- BSONElement &lowest,
- BSONElement &highest ) {
- BSONObjIterator i( e.embeddedObject() );
- while( 1 ) {
- BSONElement s = i.next();
- if ( s.eoo() )
- break;
- int op = s.getGtLtOp();
- if ( ( op == JSMatcher::LT || op == JSMatcher::LTE ) &&
- ( s.woCompare( highest, false ) < 0 ) )
- highest = s;
- else if ( ( op == JSMatcher::GT || op == JSMatcher::GTE ) &&
- ( s.woCompare( lowest, false ) > 0 ) )
- lowest = s;
- }
+ BSONElement &lowest,
+ BSONElement &highest ) {
+ BSONObjIterator i( e.embeddedObject() );
+ while ( 1 ) {
+ BSONElement s = i.next();
+ if ( s.eoo() )
+ break;
+ int op = s.getGtLtOp();
+ if ( ( op == JSMatcher::LT || op == JSMatcher::LTE ) &&
+ ( s.woCompare( highest, false ) < 0 ) )
+ highest = s;
+ else if ( ( op == JSMatcher::GT || op == JSMatcher::GTE ) &&
+ ( s.woCompare( lowest, false ) > 0 ) )
+ lowest = s;
+ }
}
// Expand all field names in key to use dotted notation.
void BtreeCursor::getFields( const BSONObj &key, set< string > &fields ) {
- BSONObjIterator i( key );
- while( 1 ) {
- BSONElement k = i.next();
- if( k.eoo() )
- break;
- bool addedSubfield = false;
- if( k.type() == Object ) {
- set< string > subFields;
- getFields( k.embeddedObject(), subFields );
- for( set< string >::iterator i = subFields.begin(); i != subFields.end(); ++i ) {
- addedSubfield = true;
- fields.insert( k.fieldName() + string( "." ) + *i );
- }
- }
- if ( !addedSubfield )
- fields.insert( k.fieldName() );
- }
+ BSONObjIterator i( key );
+ while ( 1 ) {
+ BSONElement k = i.next();
+ if ( k.eoo() )
+ break;
+ bool addedSubfield = false;
+ if ( k.type() == Object ) {
+ set< string > subFields;
+ getFields( k.embeddedObject(), subFields );
+ for ( set< string >::iterator i = subFields.begin(); i != subFields.end(); ++i ) {
+ addedSubfield = true;
+ fields.insert( k.fieldName() + string( "." ) + *i );
+ }
+ }
+ if ( !addedSubfield )
+ fields.insert( k.fieldName() );
+ }
}
/* skip unused keys. */
void BtreeCursor::checkUnused() {
- int u = 0;
- while( 1 ) {
- if( !ok() )
- break;
- BtreeBucket *b = bucket.btree();
- _KeyNode& kn = b->k(keyOfs);
- if( kn.isUsed() )
- break;
- bucket = b->advance(bucket, keyOfs, direction, "checkUnused");
- u++;
- }
- if( u > 10 )
- OCCASIONALLY log() << "btree unused skipped:" << u << '\n';
+ int u = 0;
+ while ( 1 ) {
+ if ( !ok() )
+ break;
+ BtreeBucket *b = bucket.btree();
+ _KeyNode& kn = b->k(keyOfs);
+ if ( kn.isUsed() )
+ break;
+ bucket = b->advance(bucket, keyOfs, direction, "checkUnused");
+ u++;
+ }
+ if ( u > 10 )
+ OCCASIONALLY log() << "btree unused skipped:" << u << '\n';
}
// Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
int sgn( int i ) {
- if( i == 0 )
- return 0;
- return i > 0 ? 1 : -1;
+ if ( i == 0 )
+ return 0;
+ return i > 0 ? 1 : -1;
}
// Check if the current key is beyond endKey.
void BtreeCursor::checkEnd() {
- if ( bucket.isNull() )
- return;
- int cmp = sgn( endKey.woCompare( currKey() ) );
- if ( cmp != 0 && cmp != direction )
- bucket = DiskLoc();
+ if ( bucket.isNull() )
+ return;
+ int cmp = sgn( endKey.woCompare( currKey() ) );
+ if ( cmp != 0 && cmp != direction )
+ bucket = DiskLoc();
}
-bool BtreeCursor::advance() {
- if( bucket.isNull() )
- return false;
- bucket = bucket.btree()->advance(bucket, keyOfs, direction, "BtreeCursor::advance");
- checkUnused();
- checkEnd();
- return !bucket.isNull();
+bool BtreeCursor::advance() {
+ if ( bucket.isNull() )
+ return false;
+ bucket = bucket.btree()->advance(bucket, keyOfs, direction, "BtreeCursor::advance");
+ checkUnused();
+ checkEnd();
+ return !bucket.isNull();
}
void BtreeCursor::noteLocation() {
- if( !eof() ) {
- BSONObj o = bucket.btree()->keyAt(keyOfs).copy();
- keyAtKeyOfs = o;
- locAtKeyOfs = bucket.btree()->k(keyOfs).recordLoc;
- }
+ if ( !eof() ) {
+ BSONObj o = bucket.btree()->keyAt(keyOfs).copy();
+ keyAtKeyOfs = o;
+ locAtKeyOfs = bucket.btree()->k(keyOfs).recordLoc;
+ }
}
-/* Since the last noteLocation(), our key may have moved around, and that old cached
- information may thus be stale and wrong (although often it is right). We check
+/* Since the last noteLocation(), our key may have moved around, and that old cached
+ information may thus be stale and wrong (although often it is right). We check
that here; if we have moved, we have to search back for where we were at.
- i.e., after operations on the index, the BtreeCursor's cached location info may
- be invalid. This function ensures validity, so you should call it before using
+ i.e., after operations on the index, the BtreeCursor's cached location info may
+ be invalid. This function ensures validity, so you should call it before using
the cursor if other writers have used the database since the last noteLocation
call.
*/
-void BtreeCursor::checkLocation() {
- if( eof() )
- return;
-
- if( keyOfs >= 0 ) {
- BtreeBucket *b = bucket.btree();
-
- assert( !keyAtKeyOfs.isEmpty() );
-
- // Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
- // which is possible as keys may have been deleted.
- if( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
- b->k(keyOfs).recordLoc == locAtKeyOfs ) {
- if( !b->k(keyOfs).isUsed() ) {
- /* we were deleted but still exist as an unused
- marker key. advance.
- */
- checkUnused();
- }
- return;
- }
- }
-
- /* normally we don't get to here. when we do, old position is no longer
- valid and we must refind where we left off (which is expensive)
- */
-
- bool found;
-
- /* TODO: Switch to keep indexdetails and do idx.head! */
- bucket = indexDetails.head.btree()->locate(indexDetails.head, keyAtKeyOfs, keyOfs, found, locAtKeyOfs, direction);
- RARELY log() << " key seems to have moved in the index, refinding. found:" << found << endl;
- if( found )
- checkUnused();
+void BtreeCursor::checkLocation() {
+ if ( eof() )
+ return;
+
+ if ( keyOfs >= 0 ) {
+ BtreeBucket *b = bucket.btree();
+
+ assert( !keyAtKeyOfs.isEmpty() );
+
+ // Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
+ // which is possible as keys may have been deleted.
+ if ( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
+ b->k(keyOfs).recordLoc == locAtKeyOfs ) {
+ if ( !b->k(keyOfs).isUsed() ) {
+ /* we were deleted but still exist as an unused
+ marker key. advance.
+ */
+ checkUnused();
+ }
+ return;
+ }
+ }
+
+ /* normally we don't get to here. when we do, old position is no longer
+ valid and we must refind where we left off (which is expensive)
+ */
+
+ bool found;
+
+ /* TODO: Switch to keep indexdetails and do idx.head! */
+ bucket = indexDetails.head.btree()->locate(indexDetails.head, keyAtKeyOfs, keyOfs, found, locAtKeyOfs, direction);
+ RARELY log() << " key seems to have moved in the index, refinding. found:" << found << endl;
+ if ( found )
+ checkUnused();
}
/* ----------------------------------------------------------------------------- */
-struct BtreeUnitTest {
- BtreeUnitTest() {
- assert( minDiskLoc.compare(maxDiskLoc) < 0 );
- }
+struct BtreeUnitTest {
+ BtreeUnitTest() {
+ assert( minDiskLoc.compare(maxDiskLoc) < 0 );
+ }
} btut;
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index 04ff705bad8..a26f08887ff 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -1,22 +1,22 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* clientcursor.cpp
- ClientCursor is a wrapper that represents a cursorid from our database
+ ClientCursor is a wrapper that represents a cursorid from our database
application's perspective.
Cursor -- and its derived classes -- are our internal cursors.
@@ -36,189 +36,191 @@ CCById clientCursorsById;
typedef multimap<DiskLoc, ClientCursor*> ByLoc;
ByLoc byLoc;
-unsigned byLocSize() {
+unsigned byLocSize() {
return byLoc.size();
}
-void ClientCursor::setLastLoc(DiskLoc L) {
- if( L == _lastLoc )
- return;
+void ClientCursor::setLastLoc(DiskLoc L) {
+ if ( L == _lastLoc )
+ return;
- if( !_lastLoc.isNull() ) {
- ByLoc::iterator i = kv_find(byLoc, _lastLoc, this);
- if( i != byLoc.end() )
- byLoc.erase(i);
- }
+ if ( !_lastLoc.isNull() ) {
+ ByLoc::iterator i = kv_find(byLoc, _lastLoc, this);
+ if ( i != byLoc.end() )
+ byLoc.erase(i);
+ }
- if( !L.isNull() )
- byLoc.insert( make_pair(L, this) );
- _lastLoc = L;
+ if ( !L.isNull() )
+ byLoc.insert( make_pair(L, this) );
+ _lastLoc = L;
}
/* ------------------------------------------- */
/* must call this when a btree node is updated */
-//void removedKey(const DiskLoc& btreeLoc, int keyPos) {
+//void removedKey(const DiskLoc& btreeLoc, int keyPos) {
//}
-/* todo: this implementation is incomplete. we use it as a prefix for dropDatabase, which
- works fine as the prefix will end with '.'. however, when used with drop and
- deleteIndexes, this could take out cursors that belong to something else -- if you
+/* todo: this implementation is incomplete. we use it as a prefix for dropDatabase, which
+ works fine as the prefix will end with '.'. however, when used with drop and
+ deleteIndexes, this could take out cursors that belong to something else -- if you
drop "foo", currently, this will kill cursors for "foobar".
*/
-void ClientCursor::invalidate(const char *nsPrefix) {
- vector<ClientCursor*> toDelete;
-
- int len = strlen(nsPrefix);
- assert( len > 0 && strchr(nsPrefix, '.') );
- for( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ++i ) {
- ClientCursor *cc = i->second;
- if( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 )
- toDelete.push_back(i->second);
- }
-
- for( vector<ClientCursor*>::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
- delete (*i);
+void ClientCursor::invalidate(const char *nsPrefix) {
+ vector<ClientCursor*> toDelete;
+
+ int len = strlen(nsPrefix);
+ assert( len > 0 && strchr(nsPrefix, '.') );
+ for ( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ++i ) {
+ ClientCursor *cc = i->second;
+ if ( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 )
+ toDelete.push_back(i->second);
+ }
+
+ for ( vector<ClientCursor*>::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
+ delete (*i);
}
-/* must call when a btree bucket going away.
- note this is potentially slow
+/* must call when a btree bucket going away.
+ note this is potentially slow
*/
-void aboutToDeleteBucket(const DiskLoc& b) {
- RARELY if( byLoc.size() > 70 ) {
- log() << "perf warning: byLoc.size=" << byLoc.size() << " in aboutToDeleteBucket\n";
- }
- for( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); i++ )
- i->second->c->aboutToDeleteBucket(b);
+void aboutToDeleteBucket(const DiskLoc& b) {
+ RARELY if ( byLoc.size() > 70 ) {
+ log() << "perf warning: byLoc.size=" << byLoc.size() << " in aboutToDeleteBucket\n";
+ }
+ for ( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); i++ )
+ i->second->c->aboutToDeleteBucket(b);
}
/* must call this on a delete so we clean up the cursors. */
-void aboutToDelete(const DiskLoc& dl) {
+void aboutToDelete(const DiskLoc& dl) {
ByLoc::iterator j = byLoc.lower_bound(dl);
ByLoc::iterator stop = byLoc.upper_bound(dl);
- if( j == stop )
+ if ( j == stop )
return;
assert( dbMutexInfo.isLocked() );
- vector<ClientCursor*> toAdvance;
+ vector<ClientCursor*> toAdvance;
- while( 1 ) {
+ while ( 1 ) {
toAdvance.push_back(j->second);
- WIN assert( j->first == dl );
+ WIN assert( j->first == dl );
++j;
- if( j == stop )
+ if ( j == stop )
break;
- }
+ }
wassert( toAdvance.size() < 5000 );
- for( vector<ClientCursor*>::iterator i = toAdvance.begin();
- i != toAdvance.end(); ++i )
- {
- Cursor *c = (*i)->c.get();
- DiskLoc tmp1 = c->currLoc();
- if( tmp1 != dl ) {
- /* this might indicate a failure to call ClientCursor::updateLocation() */
- problem() << "warning: cursor loc does not match byLoc position!" << endl;
- }
- c->checkLocation();
- if( c->tailing() ) {
- DEV cout << "killing cursor as we would have to advance it and it is tailable" << endl;
- delete *i;
- continue;
- }
- c->advance();
- DiskLoc newLoc = c->currLoc();
- if( newLoc.isNull() ) {
- // advanced to end -- delete cursor
- delete *i;
- }
- else {
- wassert( newLoc != dl );
- (*i)->updateLocation();
- }
- }
+ for ( vector<ClientCursor*>::iterator i = toAdvance.begin();
+ i != toAdvance.end(); ++i )
+ {
+ Cursor *c = (*i)->c.get();
+ DiskLoc tmp1 = c->currLoc();
+ if ( tmp1 != dl ) {
+ /* this might indicate a failure to call ClientCursor::updateLocation() */
+ problem() << "warning: cursor loc does not match byLoc position!" << endl;
+ }
+ c->checkLocation();
+ if ( c->tailing() ) {
+ DEV cout << "killing cursor as we would have to advance it and it is tailable" << endl;
+ delete *i;
+ continue;
+ }
+ c->advance();
+ DiskLoc newLoc = c->currLoc();
+ if ( newLoc.isNull() ) {
+ // advanced to end -- delete cursor
+ delete *i;
+ }
+ else {
+ wassert( newLoc != dl );
+ (*i)->updateLocation();
+ }
+ }
}
ClientCursor::~ClientCursor() {
- assert( pos != -2 );
- setLastLoc( DiskLoc() ); // removes us from bylocation multimap
- clientCursorsById.erase(cursorid);
- // defensive:
- (CursorId&) cursorid = -1;
- pos = -2;
+ assert( pos != -2 );
+ setLastLoc( DiskLoc() ); // removes us from bylocation multimap
+ clientCursorsById.erase(cursorid);
+ // defensive:
+ (CursorId&) cursorid = -1;
+ pos = -2;
}
-/* call when cursor's location changes so that we can update the
- cursorsbylocation map. if you are locked and internally iterating, only
+/* call when cursor's location changes so that we can update the
+ cursorsbylocation map. if you are locked and internally iterating, only
need to call when you are ready to "unlock".
*/
void ClientCursor::updateLocation() {
- assert( cursorid );
- DiskLoc cl = c->currLoc();
- if( lastLoc() == cl ) {
- //log() << "info: lastloc==curloc " << ns << '\n';
- return;
- }
- setLastLoc(cl);
- c->noteLocation();
+ assert( cursorid );
+ DiskLoc cl = c->currLoc();
+ if ( lastLoc() == cl ) {
+ //log() << "info: lastloc==curloc " << ns << '\n';
+ return;
+ }
+ setLastLoc(cl);
+ c->noteLocation();
}
int ctmLast = 0; // so we don't have to do find() which is a little slow very often.
-long long ClientCursor::allocCursorId() {
- long long x;
- int ctm = (int) curTimeMillis();
- while( 1 ) {
- x = (((long long)rand()) << 32);
- x = x | ctm | 0x80000000; // OR to make sure not zero
- if( ctm != ctmLast || ClientCursor::find(x, false) == 0 )
- break;
- }
- ctmLast = ctm;
- DEV cout << " alloccursorid " << x << endl;
- return x;
+long long ClientCursor::allocCursorId() {
+ long long x;
+ int ctm = (int) curTimeMillis();
+ while ( 1 ) {
+ x = (((long long)rand()) << 32);
+ x = x | ctm | 0x80000000; // OR to make sure not zero
+ if ( ctm != ctmLast || ClientCursor::find(x, false) == 0 )
+ break;
+ }
+ ctmLast = ctm;
+ DEV cout << " alloccursorid " << x << endl;
+ return x;
}
-class CursInspector : public SingleResultObjCursor {
- Cursor* clone() {
- return new CursInspector();
- }
- void fill() {
- b.append("byLocation_size", byLoc.size());
- b.append("clientCursors_size", clientCursorsById.size());
-/* todo update for new impl:
- stringstream ss;
- ss << '\n';
- int x = 40;
- DiskToCC::iterator it = clientCursorsByLocation.begin();
- while( it != clientCursorsByLocation.end() ) {
- DiskLoc dl = it->first;
- ss << dl.toString() << " -> \n";
- set<ClientCursor*>::iterator j = it->second.begin();
- while( j != it->second.end() ) {
- ss << " cid:" << j->second->cursorid << ' ' << j->second->ns << " pos:" << j->second->pos << " LL:" << j->second->lastLoc.toString();
- try {
- setClient(j->second->ns.c_str());
- Record *r = dl.rec();
- ss << " lwh:" << hex << r->lengthWithHeaders << " nxt:" << r->nextOfs << " prv:" << r->prevOfs << dec << ' ' << j->second->c->toString();
- if( r->nextOfs >= 0 && r->nextOfs < 16 )
- ss << " DELETED??? (!)";
- }
- catch(...) {
- ss << " EXCEPTION";
- }
- ss << "\n";
- j++;
- }
- if( --x <= 0 ) {
- ss << "only first 40 shown\n" << endl;
- break;
- }
- it++;
- }
- b.append("dump", ss.str().c_str());
-*/
- }
+class CursInspector : public SingleResultObjCursor {
+ Cursor* clone() {
+ return new CursInspector();
+ }
+ void fill() {
+ b.append("byLocation_size", byLoc.size());
+ b.append("clientCursors_size", clientCursorsById.size());
+ /* todo update for new impl:
+ stringstream ss;
+ ss << '\n';
+ int x = 40;
+ DiskToCC::iterator it = clientCursorsByLocation.begin();
+ while( it != clientCursorsByLocation.end() ) {
+ DiskLoc dl = it->first;
+ ss << dl.toString() << " -> \n";
+ set<ClientCursor*>::iterator j = it->second.begin();
+ while( j != it->second.end() ) {
+ ss << " cid:" << j->second->cursorid << ' ' << j->second->ns << " pos:" << j->second->pos << " LL:" << j->second->lastLoc.toString();
+ try {
+ setClient(j->second->ns.c_str());
+ Record *r = dl.rec();
+ ss << " lwh:" << hex << r->lengthWithHeaders << " nxt:" << r->nextOfs << " prv:" << r->prevOfs << dec << ' ' << j->second->c->toString();
+ if( r->nextOfs >= 0 && r->nextOfs < 16 )
+ ss << " DELETED??? (!)";
+ }
+ catch(...) {
+ ss << " EXCEPTION";
+ }
+ ss << "\n";
+ j++;
+ }
+ if( --x <= 0 ) {
+ ss << "only first 40 shown\n" << endl;
+ break;
+ }
+ it++;
+ }
+ b.append("dump", ss.str().c_str());
+ */
+ }
public:
- CursInspector() { reg("intr.cursors"); }
+ CursInspector() {
+ reg("intr.cursors");
+ }
} _ciproto;
diff --git a/db/clientcursor.h b/db/clientcursor.h
index 5437d067cca..a99fe77692a 100644
--- a/db/clientcursor.h
+++ b/db/clientcursor.h
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -18,7 +18,7 @@
Cursor -- and its derived classes -- are our internal cursors.
- ClientCursor is a wrapper that represents a cursorid from our database
+ ClientCursor is a wrapper that represents a cursorid from our database
application's perspective.
*/
@@ -33,54 +33,56 @@ typedef map<CursorId, ClientCursor*> CCById;
extern CCById clientCursorsById;
class ClientCursor {
- friend class CursInspector;
- DiskLoc _lastLoc; // use getter and setter not this.
- static CursorId allocCursorId();
+ friend class CursInspector;
+ DiskLoc _lastLoc; // use getter and setter not this.
+ static CursorId allocCursorId();
public:
- ClientCursor() : cursorid( allocCursorId() ), pos(0) {
- clientCursorsById.insert( make_pair(cursorid, this) );
- }
- ~ClientCursor();
- const CursorId cursorid;
- string ns;
+ ClientCursor() : cursorid( allocCursorId() ), pos(0) {
+ clientCursorsById.insert( make_pair(cursorid, this) );
+ }
+ ~ClientCursor();
+ const CursorId cursorid;
+ string ns;
//BSONObj pattern; // the query object
- auto_ptr<JSMatcher> matcher;
- auto_ptr<Cursor> c;
- int pos;
- DiskLoc lastLoc() const { return _lastLoc; }
- void setLastLoc(DiskLoc);
- auto_ptr< set<string> > filter; // which fields query wants returned
- Message originalMessage; // this is effectively an auto ptr for data the matcher points to.
+ auto_ptr<JSMatcher> matcher;
+ auto_ptr<Cursor> c;
+ int pos;
+ DiskLoc lastLoc() const {
+ return _lastLoc;
+ }
+ void setLastLoc(DiskLoc);
+ auto_ptr< set<string> > filter; // which fields query wants returned
+ Message originalMessage; // this is effectively an auto ptr for data the matcher points to.
- /* Get rid of cursors for namespaces that begin with nsprefix.
- Used by drop, deleteIndexes, dropDatabase.
- */
- static void invalidate(const char *nsPrefix);
+ /* Get rid of cursors for namespaces that begin with nsprefix.
+ Used by drop, deleteIndexes, dropDatabase.
+ */
+ static void invalidate(const char *nsPrefix);
- static bool erase(CursorId id) {
- ClientCursor *cc = find(id);
- if( cc ) {
- delete cc;
- return true;
- }
- return false;
- }
+ static bool erase(CursorId id) {
+ ClientCursor *cc = find(id);
+ if ( cc ) {
+ delete cc;
+ return true;
+ }
+ return false;
+ }
- static ClientCursor* find(CursorId id, bool warn = true) {
- CCById::iterator it = clientCursorsById.find(id);
- if( it == clientCursorsById.end() ) {
- if( warn )
- OCCASIONALLY cout << "ClientCursor::find(): cursor not found in map " << id << " (ok after a drop)\n";
- return 0;
- }
- return it->second;
- }
+ static ClientCursor* find(CursorId id, bool warn = true) {
+ CCById::iterator it = clientCursorsById.find(id);
+ if ( it == clientCursorsById.end() ) {
+ if ( warn )
+ OCCASIONALLY cout << "ClientCursor::find(): cursor not found in map " << id << " (ok after a drop)\n";
+ return 0;
+ }
+ return it->second;
+ }
- /* call when cursor's location changes so that we can update the
- cursorsbylocation map. if you are locked and internally iterating, only
- need to call when you are ready to "unlock".
- */
- void updateLocation();
+ /* call when cursor's location changes so that we can update the
+ cursorsbylocation map. if you are locked and internally iterating, only
+ need to call when you are ready to "unlock".
+ */
+ void updateLocation();
- void cleanupByLocation(DiskLoc loc);
+ void cleanupByLocation(DiskLoc loc);
};
diff --git a/db/cloner.cpp b/db/cloner.cpp
index 8c7894aab99..01a40542ae2 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -30,32 +30,32 @@
void ensureHaveIdIndex(const char *ns);
extern int port;
-class Cloner: boost::noncopyable {
- DBClientConnection conn;
- void copy(const char *from_ns, const char *to_ns, bool isindex, bool logForRepl,
- bool masterSameProcess, bool slaveOk);
- auto_ptr<DBClientCursor> createCursor(bool masterSameProcess, const char *ns, bool slaveOk);
+class Cloner: boost::noncopyable {
+ DBClientConnection conn;
+ void copy(const char *from_ns, const char *to_ns, bool isindex, bool logForRepl,
+ bool masterSameProcess, bool slaveOk);
+ auto_ptr<DBClientCursor> createCursor(bool masterSameProcess, const char *ns, bool slaveOk);
public:
- Cloner() { }
+ Cloner() { }
- /* slaveOk - if true it is ok if the source of the data is !ismaster.
+ /* slaveOk - if true it is ok if the source of the data is !ismaster.
*/
- bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk);
+ bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk);
};
/* for index info object:
{ "name" : "name_1" , "ns" : "foo.index3" , "key" : { "name" : 1.0 } }
- we need to fix up the value in the "ns" parameter so that the name prefix is correct on a
+ we need to fix up the value in the "ns" parameter so that the name prefix is correct on a
copy to a new name.
*/
-BSONObj fixindex(BSONObj o) {
+BSONObj fixindex(BSONObj o) {
BSONObjBuilder b;
BSONObjIterator i(o);
- while( i.more() ) {
+ while ( i.more() ) {
BSONElement e = i.next();
- if( e.eoo() )
+ if ( e.eoo() )
break;
- if( string("ns") == e.fieldName() ) {
+ if ( string("ns") == e.fieldName() ) {
uassert("bad ns field for index during dbcopy", e.type() == String);
const char *p = strchr(e.valuestr(), '.');
uassert("bad ns field for index during dbcopy [2]", p);
@@ -67,114 +67,114 @@ BSONObj fixindex(BSONObj o) {
}
BSONObj res= b.doneAndDecouple();
-/* if( mod ) {
- cout << "before: " << o.toString() << endl;
- o.dump();
- cout << "after: " << res.toString() << endl;
- res.dump();
- }*/
+ /* if( mod ) {
+ cout << "before: " << o.toString() << endl;
+ o.dump();
+ cout << "after: " << res.toString() << endl;
+ res.dump();
+ }*/
return res;
}
-/* copy the specified collection
+/* copy the specified collection
isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
*/
void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk) {
- auto_ptr<DBClientCursor> c;
+ auto_ptr<DBClientCursor> c;
{
dbtemprelease r;
c = createCursor( masterSameProcess, from_collection, slaveOk );
}
- assert( c.get() );
- while( 1 ) {
+ assert( c.get() );
+ while ( 1 ) {
{
dbtemprelease r;
- if( !c->more() )
+ if ( !c->more() )
break;
}
BSONObj tmp = c->next();
/* assure object is valid. note this will slow us down a good bit. */
- if( !tmp.valid() ) {
+ if ( !tmp.valid() ) {
cout << "skipping corrupt object from " << from_collection << '\n';
continue;
}
BSONObj js = tmp;
- if( isindex ) {
+ if ( isindex ) {
assert( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
}
- theDataFileMgr.insert(to_collection, (void*) js.objdata(), js.objsize());
- if( logForRepl )
+ theDataFileMgr.insert(to_collection, (void*) js.objdata(), js.objsize());
+ if ( logForRepl )
logOp("i", to_collection, js);
}
}
class DirectConnector : public DBClientCursor::Connector {
- virtual bool send( Message &toSend, Message &response, bool assertOk=true ) {
- DbResponse dbResponse;
- assembleResponse( toSend, dbResponse );
- assert( dbResponse.response );
- response = *dbResponse.response;
- return true;
- }
+ virtual bool send( Message &toSend, Message &response, bool assertOk=true ) {
+ DbResponse dbResponse;
+ assembleResponse( toSend, dbResponse );
+ assert( dbResponse.response );
+ response = *dbResponse.response;
+ return true;
+ }
};
auto_ptr< DBClientCursor > Cloner::createCursor( bool masterSameProcess, const char *ns, bool slaveOk ) {
- auto_ptr< DBClientCursor > c;
- if ( !masterSameProcess ) {
- c = auto_ptr<DBClientCursor>( conn.query(ns, emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0) );
- } else {
- c = auto_ptr<DBClientCursor>( new DBClientCursor( new DirectConnector(), ns,
- emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 ) );
- c->init();
- }
- return c;
+ auto_ptr< DBClientCursor > c;
+ if ( !masterSameProcess ) {
+ c = auto_ptr<DBClientCursor>( conn.query(ns, emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0) );
+ } else {
+ c = auto_ptr<DBClientCursor>( new DBClientCursor( new DirectConnector(), ns,
+ emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 ) );
+ c->init();
+ }
+ return c;
}
-bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk) {
- string todb = database->name;
+bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk) {
+ string todb = database->name;
stringstream a,b;
a << "localhost:" << port;
b << "127.0.0.1:" << port;
- bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost );
- if( masterSameProcess ) {
- if( fromdb == todb && database->path == dbpath ) {
+ bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost );
+ if ( masterSameProcess ) {
+ if ( fromdb == todb && database->path == dbpath ) {
// guard against an "infinite" loop
/* if you are replicating, the local.sources config may be wrong if you get this */
errmsg = "can't clone from self (localhost).";
return false;
}
- }
+ }
/* todo: we can put thesee releases inside dbclient or a dbclient specialization.
- or just wait until we get rid of global lock anyway.
+ or just wait until we get rid of global lock anyway.
*/
- string ns = fromdb + ".system.namespaces";
- auto_ptr<DBClientCursor> c;
+ string ns = fromdb + ".system.namespaces";
+ auto_ptr<DBClientCursor> c;
{
dbtemprelease r;
- if ( !masterSameProcess )
- if( !conn.connect(masterHost, errmsg) )
- return false;
- c = createCursor( masterSameProcess, ns.c_str(), slaveOk );
+ if ( !masterSameProcess )
+ if ( !conn.connect(masterHost, errmsg) )
+ return false;
+ c = createCursor( masterSameProcess, ns.c_str(), slaveOk );
+ }
+ if ( c.get() == 0 ) {
+ errmsg = "query failed " + ns;
+ return false;
}
- if( c.get() == 0 ) {
- errmsg = "query failed " + ns;
- return false;
- }
- while( 1 ) {
+ while ( 1 ) {
{
dbtemprelease r;
- if( !c->more() )
+ if ( !c->more() )
break;
}
- BSONObj collection = c->next();
- BSONElement e = collection.findElement("name");
- if( e.eoo() ) {
+ BSONObj collection = c->next();
+ BSONElement e = collection.findElement("name");
+ if ( e.eoo() ) {
string s = "bad system.namespaces object " + collection.toString();
/* temp
@@ -185,60 +185,62 @@ bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bo
massert(s.c_str(), false);
}
- assert( !e.eoo() );
- assert( e.type() == String );
- const char *from_name = e.valuestr();
- if( strstr(from_name, ".system.") || strchr(from_name, '$') ) {
- continue;
+ assert( !e.eoo() );
+ assert( e.type() == String );
+ const char *from_name = e.valuestr();
+ if ( strstr(from_name, ".system.") || strchr(from_name, '$') ) {
+ continue;
}
- BSONObj options = collection.getObjectField("options");
+ BSONObj options = collection.getObjectField("options");
/* change name "<fromdb>.collection" -> <todb>.collection */
const char *p = strchr(from_name, '.');
assert(p);
string to_name = todb + p;
- //if( !options.isEmpty() )
+ //if( !options.isEmpty() )
{
- string err;
+ string err;
const char *toname = to_name.c_str();
- userCreateNS(toname, options, err, logForRepl);
+ userCreateNS(toname, options, err, logForRepl);
- /* chunks are big enough that we should create the _id index up front, that should
- be faster. perhaps we should do that for everything? Not doing that yet -- not sure
+ /* chunks are big enough that we should create the _id index up front, that should
+ be faster. perhaps we should do that for everything? Not doing that yet -- not sure
how we want to handle _id-less collections, and we might not want to create the index
there.
*/
- if( strstr(toname, "._chunks") )
+ if ( strstr(toname, "._chunks") )
ensureHaveIdIndex(toname);
- }
- copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk);
- }
+ }
+ copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk);
+ }
- // now build the indexes
- string system_indexes_from = fromdb + ".system.indexes";
- string system_indexes_to = todb + ".system.indexes";
- copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk);
+ // now build the indexes
+ string system_indexes_from = fromdb + ".system.indexes";
+ string system_indexes_to = todb + ".system.indexes";
+ copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk);
- return true;
+ return true;
}
bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication, bool slaveOk)
{
- Cloner c;
- return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk);
+ Cloner c;
+ return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk);
}
/* Usage:
- mydb.$cmd.findOne( { clone: "fromhost" } );
+ mydb.$cmd.findOne( { clone: "fromhost" } );
*/
-class CmdClone : public Command {
+class CmdClone : public Command {
public:
- virtual bool slaveOk() { return false; }
+ virtual bool slaveOk() {
+ return false;
+ }
CmdClone() : Command("clone") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string from = cmdObj.getStringField("clone");
- if( from.empty() )
+ if ( from.empty() )
return false;
/* replication note: we must logOp() not the command, but the cloned data -- if the slave
were to clone it would get a different point-in-time and not match.
@@ -250,14 +252,18 @@ public:
/* Usage:
admindb.$cmd.findOne( { copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db> } );
*/
-class CmdCopyDb : public Command {
+class CmdCopyDb : public Command {
public:
CmdCopyDb() : Command("copydb") { }
- virtual bool adminOnly() { return true; }
- virtual bool slaveOk() { return false; }
+ virtual bool adminOnly() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string fromhost = cmdObj.getStringField("fromhost");
- if( fromhost.empty() ) {
+ if ( fromhost.empty() ) {
/* copy from self */
stringstream ss;
ss << "localhost:" << port;
@@ -265,7 +271,7 @@ public:
}
string fromdb = cmdObj.getStringField("fromdb");
string todb = cmdObj.getStringField("todb");
- if( fromhost.empty() || todb.empty() || fromdb.empty() ) {
+ if ( fromhost.empty() || todb.empty() || fromdb.empty() ) {
errmsg = "parms missing - {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>}";
return false;
}
diff --git a/db/commands.cpp b/db/commands.cpp
index 73838b7ea50..7e5aa65cc8e 100644
--- a/db/commands.cpp
+++ b/db/commands.cpp
@@ -3,16 +3,16 @@
*/
/**
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -23,46 +23,46 @@
map<string,Command*> *commands;
-Command::Command(const char *_name) : name(_name) {
+Command::Command(const char *_name) : name(_name) {
// register ourself.
- if( commands == 0 )
+ if ( commands == 0 )
commands = new map<string,Command*>;
(*commands)[name] = this;
}
-bool runCommandAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder) {
- const char *p = strchr(ns, '.');
- if( !p ) return false;
- if( strcmp(p, ".$cmd") != 0 ) return false;
+bool runCommandAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder) {
+ const char *p = strchr(ns, '.');
+ if ( !p ) return false;
+ if ( strcmp(p, ".$cmd") != 0 ) return false;
- bool ok = false;
- bool valid = false;
+ bool ok = false;
+ bool valid = false;
- BSONElement e;
- e = jsobj.firstElement();
+ BSONElement e;
+ e = jsobj.firstElement();
map<string,Command*>::iterator i;
- if( e.eoo() )
+ if ( e.eoo() )
;
- /* check for properly registered command objects. Note that all the commands below should be
+ /* check for properly registered command objects. Note that all the commands below should be
migrated over to the command object format.
*/
- else if( (i = commands->find(e.fieldName())) != commands->end() ) {
+ else if ( (i = commands->find(e.fieldName())) != commands->end() ) {
valid = true;
string errmsg;
Command *c = i->second;
- if( c->adminOnly() && strncmp(ns, "admin", p-ns) != 0 ) {
+ if ( c->adminOnly() && strncmp(ns, "admin", p-ns) != 0 ) {
ok = false;
- errmsg = "access denied";
+ errmsg = "access denied";
}
else {
ok = c->run(ns, jsobj, errmsg, anObjBuilder, false);
}
- if( !ok )
+ if ( !ok )
anObjBuilder.append("errmsg", errmsg);
return true;
}
-
- return false;
+
+ return false;
}
diff --git a/db/commands.h b/db/commands.h
index 24418da8e4b..0c2a54a0dd7 100644
--- a/db/commands.h
+++ b/db/commands.h
@@ -1,16 +1,16 @@
// commands.h
/**
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -22,14 +22,14 @@ class BSONObjBuilder;
// db "commands" (sent via db.$cmd.findOne(...))
// subclass to make a command.
-class Command {
+class Command {
public:
string name;
- /* run the given command
+ /* run the given command
implement this...
- fromRepl - command is being invoked as part of replication syncing. In this situation you
+ fromRepl - command is being invoked as part of replication syncing. In this situation you
normally do not want to log the command to the local oplog.
return value is true if succeeded. if false, set errmsg text.
@@ -37,19 +37,23 @@ public:
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) = 0;
/* Return true if only the admin ns has privileges to run this command. */
- virtual bool adminOnly() { return false; }
+ virtual bool adminOnly() {
+ return false;
+ }
- /* Return true if slaves of a replication pair are allowed to execute the command
+ /* Return true if slaves of a replication pair are allowed to execute the command
(the command directly from a client -- if fromRepl, always allowed).
*/
virtual bool slaveOk() = 0;
- /* Override and return true to if true,log the operation (logOp()) to the replication log.
+ /* Override and return true to if true,log the operation (logOp()) to the replication log.
(not done if fromRepl of course)
Note if run() returns false, we do NOT log.
*/
- virtual bool logTheOp() { return false; }
+ virtual bool logTheOp() {
+ return false;
+ }
Command(const char *_name);
};
diff --git a/db/cursor.h b/db/cursor.h
index ecd417ed433..32aa5a3b4ad 100644
--- a/db/cursor.h
+++ b/db/cursor.h
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -18,130 +18,159 @@
#include "../stdafx.h"
-/* Query cursors, base class. This is for our internal cursors. "ClientCursor" is a separate
+/* Query cursors, base class. This is for our internal cursors. "ClientCursor" is a separate
concept and is for the user's cursor.
*/
class Cursor {
public:
- virtual bool ok() = 0;
- bool eof() { return !ok(); }
- virtual Record* _current() = 0;
- virtual BSONObj current() = 0;
- virtual DiskLoc currLoc() = 0;
- virtual bool advance() = 0; /*true=ok*/
-
- /* Implement these if you want the cursor to be "tailable" */
- /* tailable(): if true, cursor has tailable capability AND
- the user requested use of those semantics. */
- virtual bool tailable() { return false; }
- /* indicates we should mark where we are and go into tail mode. */
- virtual void setAtTail() { assert(false); }
- /* you must call tailResume before reusing the cursor */
- virtual void tailResume() { }
- /* indicates ifi we are actively tailing. once it goes active,
- this should return treu even after tailResume(). */
- virtual bool tailing() { return false; }
-
- virtual void aboutToDeleteBucket(const DiskLoc& b) { }
-
- /* optional to implement. if implemented, means 'this' is a prototype */
- virtual Cursor* clone() { return 0; }
-
- virtual BSONObj indexKeyPattern() { return BSONObj(); }
-
- /* called after every query block is iterated -- i.e. between getMore() blocks
- so you can note where we are, if necessary.
- */
- virtual void noteLocation() { }
-
- /* called before query getmore block is iterated */
- virtual void checkLocation() { }
-
- virtual string toString() { return "abstract?"; }
-
- /* used for multikey index traversal to avoid sending back dups. see JSMatcher::matches() */
- set<DiskLoc> dups;
- bool getsetdup(DiskLoc loc) {
- /* to save mem only call this when there is risk of dups (e.g. when 'deep'/multikey) */
- if( dups.count(loc) > 0 )
- return true;
- dups.insert(loc);
- return false;
- }
+ virtual bool ok() = 0;
+ bool eof() {
+ return !ok();
+ }
+ virtual Record* _current() = 0;
+ virtual BSONObj current() = 0;
+ virtual DiskLoc currLoc() = 0;
+ virtual bool advance() = 0; /*true=ok*/
+
+ /* Implement these if you want the cursor to be "tailable" */
+ /* tailable(): if true, cursor has tailable capability AND
+ the user requested use of those semantics. */
+ virtual bool tailable() {
+ return false;
+ }
+ /* indicates we should mark where we are and go into tail mode. */
+ virtual void setAtTail() {
+ assert(false);
+ }
+ /* you must call tailResume before reusing the cursor */
+ virtual void tailResume() { }
+ /* indicates ifi we are actively tailing. once it goes active,
+ this should return treu even after tailResume(). */
+ virtual bool tailing() {
+ return false;
+ }
+
+ virtual void aboutToDeleteBucket(const DiskLoc& b) { }
+
+ /* optional to implement. if implemented, means 'this' is a prototype */
+ virtual Cursor* clone() {
+ return 0;
+ }
+
+ virtual BSONObj indexKeyPattern() {
+ return BSONObj();
+ }
+
+ /* called after every query block is iterated -- i.e. between getMore() blocks
+ so you can note where we are, if necessary.
+ */
+ virtual void noteLocation() { }
+
+ /* called before query getmore block is iterated */
+ virtual void checkLocation() { }
+
+ virtual string toString() {
+ return "abstract?";
+ }
+
+ /* used for multikey index traversal to avoid sending back dups. see JSMatcher::matches() */
+ set<DiskLoc> dups;
+ bool getsetdup(DiskLoc loc) {
+ /* to save mem only call this when there is risk of dups (e.g. when 'deep'/multikey) */
+ if ( dups.count(loc) > 0 )
+ return true;
+ dups.insert(loc);
+ return false;
+ }
};
/* table-scan style cursor */
class BasicCursor : public Cursor {
protected:
- DiskLoc curr, last;
+ DiskLoc curr, last;
private:
- // for tailing:
- enum State { Normal, TailPoint, TailResumed } state;
- void init() { state = Normal; }
+ // for tailing:
+ enum State { Normal, TailPoint, TailResumed } state;
+ void init() {
+ state = Normal;
+ }
public:
- bool ok() { return !curr.isNull(); }
- Record* _current() {
- assert( ok() );
- return curr.rec();
- }
- BSONObj current() {
- Record *r = _current();
- BSONObj j(r);
- return j;
- }
- virtual DiskLoc currLoc() { return curr; }
-
- bool advance() {
- if( eof() )
- return false;
- Record *r = _current();
- last = curr;
- curr = r->getNext(curr);
- return ok();
- }
-
- BasicCursor(DiskLoc dl) : curr(dl) { init(); }
- BasicCursor() { init(); }
- virtual string toString() { return "BasicCursor"; }
-
- virtual void tailResume() {
- if( state == TailPoint ) {
- state = TailResumed;
- advance();
- }
- }
- virtual void setAtTail() {
- assert( state != TailPoint );
- assert( curr.isNull() );
- assert( !last.isNull() );
- curr = last; last.Null();
- state = TailPoint;
- }
- virtual bool tailable() {
- // to go into tail mode we need a non-null point of reference for resumption
- return !last.isNull();
- }
- virtual bool tailing() {
- return state != Normal;
- }
+ bool ok() {
+ return !curr.isNull();
+ }
+ Record* _current() {
+ assert( ok() );
+ return curr.rec();
+ }
+ BSONObj current() {
+ Record *r = _current();
+ BSONObj j(r);
+ return j;
+ }
+ virtual DiskLoc currLoc() {
+ return curr;
+ }
+
+ bool advance() {
+ if ( eof() )
+ return false;
+ Record *r = _current();
+ last = curr;
+ curr = r->getNext(curr);
+ return ok();
+ }
+
+ BasicCursor(DiskLoc dl) : curr(dl) {
+ init();
+ }
+ BasicCursor() {
+ init();
+ }
+ virtual string toString() {
+ return "BasicCursor";
+ }
+
+ virtual void tailResume() {
+ if ( state == TailPoint ) {
+ state = TailResumed;
+ advance();
+ }
+ }
+ virtual void setAtTail() {
+ assert( state != TailPoint );
+ assert( curr.isNull() );
+ assert( !last.isNull() );
+ curr = last;
+ last.Null();
+ state = TailPoint;
+ }
+ virtual bool tailable() {
+ // to go into tail mode we need a non-null point of reference for resumption
+ return !last.isNull();
+ }
+ virtual bool tailing() {
+ return state != Normal;
+ }
};
/* used for order { $natural: -1 } */
class ReverseCursor : public BasicCursor {
public:
- bool advance() {
- if( eof() )
- return false;
- Record *r = _current();
- last = curr;
- curr = r->getPrev(curr);
- return ok();
- }
-
- ReverseCursor(DiskLoc dl) : BasicCursor(dl) { }
- ReverseCursor() { }
- virtual string toString() { return "ReverseCursor"; }
+ bool advance() {
+ if ( eof() )
+ return false;
+ Record *r = _current();
+ last = curr;
+ curr = r->getPrev(curr);
+ return ok();
+ }
+
+ ReverseCursor(DiskLoc dl) : BasicCursor(dl) { }
+ ReverseCursor() { }
+ virtual string toString() {
+ return "ReverseCursor";
+ }
};
diff --git a/db/database.h b/db/database.h
index 898e371a76e..dc329435a98 100644
--- a/db/database.h
+++ b/db/database.h
@@ -2,113 +2,115 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
-/* Database represents a database database
+/* Database represents a database database
Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
*/
-class Database {
+class Database {
public:
- Database(const char *nm, bool& justCreated, const char *_path = dbpath) :
- name(nm),
- path(_path) {
- assert( !string( nm ).empty() );
- justCreated = namespaceIndex.init(_path, nm);
- profile = 0;
- profileName = name + ".system.profile";
- }
- ~Database() {
- int n = files.size();
- for( int i = 0; i < n; i++ )
- delete files[i];
- }
+ Database(const char *nm, bool& justCreated, const char *_path = dbpath) :
+ name(nm),
+ path(_path) {
+ assert( !string( nm ).empty() );
+ justCreated = namespaceIndex.init(_path, nm);
+ profile = 0;
+ profileName = name + ".system.profile";
+ }
+ ~Database() {
+ int n = files.size();
+ for ( int i = 0; i < n; i++ )
+ delete files[i];
+ }
- PhysicalDataFile* getFile(int n) {
- assert(this);
+ PhysicalDataFile* getFile(int n) {
+ assert(this);
- if( n < 0 || n >= DiskLoc::MaxFiles ) {
- cout << "getFile(): n=" << n << endl;
- assert( n >= 0 && n < DiskLoc::MaxFiles );
- }
- DEV {
- if( n > 100 )
- cout << "getFile(): n=" << n << "?" << endl;
- }
- while( n >= (int) files.size() )
- files.push_back(0);
- PhysicalDataFile* p = files[n];
- if( p == 0 ) {
- stringstream ss;
- ss << name << '.' << n;
- boost::filesystem::path fullName;
- fullName = boost::filesystem::path(path) / ss.str();
- string fullNameString = fullName.string();
- p = new PhysicalDataFile(n);
- try {
+ if ( n < 0 || n >= DiskLoc::MaxFiles ) {
+ cout << "getFile(): n=" << n << endl;
+ assert( n >= 0 && n < DiskLoc::MaxFiles );
+ }
+ DEV {
+ if ( n > 100 )
+ cout << "getFile(): n=" << n << "?" << endl;
+ }
+ while ( n >= (int) files.size() )
+ files.push_back(0);
+ PhysicalDataFile* p = files[n];
+ if ( p == 0 ) {
+ stringstream ss;
+ ss << name << '.' << n;
+ boost::filesystem::path fullName;
+ fullName = boost::filesystem::path(path) / ss.str();
+ string fullNameString = fullName.string();
+ p = new PhysicalDataFile(n);
+ try {
p->open(n, fullNameString.c_str() );
- }
- catch( AssertionException& u ) {
+ }
+ catch ( AssertionException& u ) {
delete p;
throw u;
}
- files[n] = p;
- }
- return p;
- }
+ files[n] = p;
+ }
+ return p;
+ }
- PhysicalDataFile* addAFile() {
- int n = (int) files.size();
- return getFile(n);
- }
+ PhysicalDataFile* addAFile() {
+ int n = (int) files.size();
+ return getFile(n);
+ }
- PhysicalDataFile* suitableFile(int sizeNeeded) {
+ PhysicalDataFile* suitableFile(int sizeNeeded) {
PhysicalDataFile* f = newestFile();
- for( int i = 0; i < 8; i++ ) {
- if( f->getHeader()->unusedLength >= sizeNeeded )
+ for ( int i = 0; i < 8; i++ ) {
+ if ( f->getHeader()->unusedLength >= sizeNeeded )
break;
f = addAFile();
- if( f->getHeader()->fileLength > 1500000000 ) // this is as big as they get so might as well stop
+ if ( f->getHeader()->fileLength > 1500000000 ) // this is as big as they get so might as well stop
break;
}
return f;
}
- PhysicalDataFile* newestFile() {
- int n = (int) files.size();
- if( n > 0 ) n--;
- return getFile(n);
- }
+ PhysicalDataFile* newestFile() {
+ int n = (int) files.size();
+ if ( n > 0 ) n--;
+ return getFile(n);
+ }
void finishInit(); // ugly...
- vector<PhysicalDataFile*> files;
- string name; // "alleyinsider"
- string path;
- NamespaceIndex namespaceIndex;
- int profile; // 0=off.
- string profileName; // "alleyinsider.system.profile"
+ vector<PhysicalDataFile*> files;
+ string name; // "alleyinsider"
+ string path;
+ NamespaceIndex namespaceIndex;
+ int profile; // 0=off.
+ string profileName; // "alleyinsider.system.profile"
QueryOptimizer optimizer;
- bool haveLogged() { return _haveLogged; }
+ bool haveLogged() {
+ return _haveLogged;
+ }
void setHaveLogged();
private:
// see dbinfo.h description. if true, we have logged to the replication log.
- bool _haveLogged;
+ bool _haveLogged;
};
diff --git a/db/db.cpp b/db/db.cpp
index caf1db073ad..fcd26343c1f 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -3,16 +3,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -47,62 +47,62 @@ void startReplication();
void pairWith(const char *remoteEnd, const char *arb);
struct MyStartupTests {
- MyStartupTests() {
- assert( sizeof(OID) == 12 );
- }
+ MyStartupTests() {
+ assert( sizeof(OID) == 12 );
+ }
} mystartupdbcpp;
-void quicktest() {
- cout << "quicktest()\n";
+void quicktest() {
+ cout << "quicktest()\n";
- MemoryMappedFile mmf;
- char *m = (char *) mmf.map("/tmp/quicktest", 16384);
- // cout << "mmf reads: " << m << endl;
- strcpy_s(m, 1000, "hello worldz");
+ MemoryMappedFile mmf;
+ char *m = (char *) mmf.map("/tmp/quicktest", 16384);
+ // cout << "mmf reads: " << m << endl;
+ strcpy_s(m, 1000, "hello worldz");
}
QueryResult* emptyMoreResult(long long);
void testTheDb() {
- stringstream ss;
+ stringstream ss;
- setClient("sys.unittest.pdfile");
+ setClient("sys.unittest.pdfile");
- /* this is not validly formatted, if you query this namespace bad things will happen */
- theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
- theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
+ /* this is not validly formatted, if you query this namespace bad things will happen */
+ theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
+ theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
- BSONObj j1((const char *) &js1);
- deleteObjects("sys.unittest.delete", j1, false);
- theDataFileMgr.insert("sys.unittest.delete", &js1, sizeof(js1));
- deleteObjects("sys.unittest.delete", j1, false);
- updateObjects("sys.unittest.delete", j1, j1, true,ss);
- updateObjects("sys.unittest.delete", j1, j1, false,ss);
+ BSONObj j1((const char *) &js1);
+ deleteObjects("sys.unittest.delete", j1, false);
+ theDataFileMgr.insert("sys.unittest.delete", &js1, sizeof(js1));
+ deleteObjects("sys.unittest.delete", j1, false);
+ updateObjects("sys.unittest.delete", j1, j1, true,ss);
+ updateObjects("sys.unittest.delete", j1, j1, false,ss);
- auto_ptr<Cursor> c = theDataFileMgr.findAll("sys.unittest.pdfile");
- while( c->ok() ) {
- c->_current();
- c->advance();
- }
- cout << endl;
+ auto_ptr<Cursor> c = theDataFileMgr.findAll("sys.unittest.pdfile");
+ while ( c->ok() ) {
+ c->_current();
+ c->advance();
+ }
+ cout << endl;
- database = 0;
+ database = 0;
}
MessagingPort *grab = 0;
void connThread();
-class OurListener : public Listener {
+class OurListener : public Listener {
public:
- OurListener(int p) : Listener(p) { }
- virtual void accepted(MessagingPort *mp) {
- assert( grab == 0 );
- grab = mp;
- boost::thread thr(connThread);
- while( grab )
- sleepmillis(1);
- }
+ OurListener(int p) : Listener(p) { }
+ virtual void accepted(MessagingPort *mp) {
+ assert( grab == 0 );
+ grab = mp;
+ boost::thread thr(connThread);
+ while ( grab )
+ sleepmillis(1);
+ }
};
void webServerThread();
@@ -112,74 +112,74 @@ void pdfileInit();
114 bad memory bug fixed
115 replay, opLogging
*/
-void listen(int port) {
- const char *Version = "db version: 122";
- problem() << Version << endl;
- problem() << "pdfile version " << VERSION << "." << VERSION_MINOR << endl;
- pdfileInit();
- //testTheDb();
- log() << "waiting for connections on port " << port << "..." << endl;
- OurListener l(port);
- startReplication();
+void listen(int port) {
+ const char *Version = "db version: 122";
+ problem() << Version << endl;
+ problem() << "pdfile version " << VERSION << "." << VERSION_MINOR << endl;
+ pdfileInit();
+ //testTheDb();
+ log() << "waiting for connections on port " << port << "..." << endl;
+ OurListener l(port);
+ startReplication();
boost::thread thr(webServerThread);
- l.listen();
+ l.listen();
}
-class JniMessagingPort : public AbstractMessagingPort {
+class JniMessagingPort : public AbstractMessagingPort {
public:
- JniMessagingPort(Message& _container) : container(_container) { }
- void reply(Message& received, Message& response, MSGID) {
- container = response;
- }
- void reply(Message& received, Message& response) {
- container = response;
- }
- Message & container;
+ JniMessagingPort(Message& _container) : container(_container) { }
+ void reply(Message& received, Message& response, MSGID) {
+ container = response;
+ }
+ void reply(Message& received, Message& response) {
+ container = response;
+ }
+ Message & container;
};
-/* we create one thread for each connection from an app server database.
+/* we create one thread for each connection from an app server database.
app server will open a pool of threads.
*/
void connThread()
{
- try {
-
- MessagingPort& dbMsgPort = *grab;
- grab = 0;
-
- Message m;
- while( 1 ) {
- m.reset();
-
- if( !dbMsgPort.recv(m) ) {
- log() << "end connection " << dbMsgPort.farEnd.toString() << endl;
- dbMsgPort.shutdown();
- break;
- }
-
- DbResponse dbresponse;
- if( !assembleResponse( m, dbresponse ) ) {
- cout << curTimeMillis() % 10000 << " end msg " << dbMsgPort.farEnd.toString() << endl;
- if( dbMsgPort.farEnd.isLocalHost() ) {
- dbMsgPort.shutdown();
- sleepmillis(50);
- problem() << "exiting end msg" << endl;
- exit(EXIT_SUCCESS);
- }
- else {
- cout << " (not from localhost, ignoring end msg)" << endl;
- }
- }
-
- if( dbresponse.response )
- dbMsgPort.reply(m, *dbresponse.response, dbresponse.responseTo);
- }
-
- }
- catch( AssertionException& ) {
- problem() << "Uncaught AssertionException, terminating" << endl;
- exit(15);
- }
+ try {
+
+ MessagingPort& dbMsgPort = *grab;
+ grab = 0;
+
+ Message m;
+ while ( 1 ) {
+ m.reset();
+
+ if ( !dbMsgPort.recv(m) ) {
+ log() << "end connection " << dbMsgPort.farEnd.toString() << endl;
+ dbMsgPort.shutdown();
+ break;
+ }
+
+ DbResponse dbresponse;
+ if ( !assembleResponse( m, dbresponse ) ) {
+ cout << curTimeMillis() % 10000 << " end msg " << dbMsgPort.farEnd.toString() << endl;
+ if ( dbMsgPort.farEnd.isLocalHost() ) {
+ dbMsgPort.shutdown();
+ sleepmillis(50);
+ problem() << "exiting end msg" << endl;
+ exit(EXIT_SUCCESS);
+ }
+ else {
+ cout << " (not from localhost, ignoring end msg)" << endl;
+ }
+ }
+
+ if ( dbresponse.response )
+ dbMsgPort.reply(m, *dbresponse.response, dbresponse.responseTo);
+ }
+
+ }
+ catch ( AssertionException& ) {
+ problem() << "Uncaught AssertionException, terminating" << endl;
+ exit(15);
+ }
}
@@ -192,37 +192,37 @@ void msg(const char *m, const char *address, int port, int extras = 0) {
// SockAddr db("10.0.21.60", MessagingPort::DBPort);
// SockAddr db("172.16.0.179", MessagingPort::DBPort);
- MessagingPort p;
- if( !p.connect(db) )
- return;
-
- const int Loops = 1;
- for( int q = 0; q < Loops; q++ ) {
- Message send;
- Message response;
-
- send.setData( dbMsg , m);
- int len = send.data->dataLen();
-
- for( int i = 0; i < extras; i++ )
- p.say(/*db, */send);
-
- Timer t;
- bool ok = p.call(send, response);
- double tm = t.micros() + 1;
- cout << " ****ok. response.data:" << ok << " time:" << tm / 1000.0 << "ms " <<
- ((double) len) * 8 / 1000000 / (tm/1000000) << "Mbps" << endl;
- if( q+1 < Loops ) {
- cout << "\t\tSLEEP 8 then sending again as a test" << endl;
- sleepsecs(8);
- }
- }
- sleepsecs(1);
-
- p.shutdown();
+ MessagingPort p;
+ if ( !p.connect(db) )
+ return;
+
+ const int Loops = 1;
+ for ( int q = 0; q < Loops; q++ ) {
+ Message send;
+ Message response;
+
+ send.setData( dbMsg , m);
+ int len = send.data->dataLen();
+
+ for ( int i = 0; i < extras; i++ )
+ p.say(/*db, */send);
+
+ Timer t;
+ bool ok = p.call(send, response);
+ double tm = t.micros() + 1;
+ cout << " ****ok. response.data:" << ok << " time:" << tm / 1000.0 << "ms " <<
+ ((double) len) * 8 / 1000000 / (tm/1000000) << "Mbps" << endl;
+ if ( q+1 < Loops ) {
+ cout << "\t\tSLEEP 8 then sending again as a test" << endl;
+ sleepsecs(8);
+ }
+ }
+ sleepsecs(1);
+
+ p.shutdown();
}
-void msg(const char *m, int extras = 0) {
+void msg(const char *m, int extras = 0) {
msg(m, "127.0.0.1", DBPort, extras);
}
@@ -231,40 +231,40 @@ void msg(const char *m, int extras = 0) {
#include <signal.h>
void pipeSigHandler( int signal ) {
- psignal( signal, "Signal Received : ");
+ psignal( signal, "Signal Received : ");
}
int segvs = 0;
void segvhandler(int x) {
- if( ++segvs > 1 ) {
- signal(x, SIG_DFL);
- if( segvs == 2 ) {
- cout << "\n\n\n got 2nd SIGSEGV" << endl;
- sayDbContext();
- }
- return;
- }
- problem() << "got SIGSEGV " << x << ", terminating :-(" << endl;
- sayDbContext();
+ if ( ++segvs > 1 ) {
+ signal(x, SIG_DFL);
+ if ( segvs == 2 ) {
+ cout << "\n\n\n got 2nd SIGSEGV" << endl;
+ sayDbContext();
+ }
+ return;
+ }
+ problem() << "got SIGSEGV " << x << ", terminating :-(" << endl;
+ sayDbContext();
// closeAllSockets();
// MemoryMappedFile::closeAllFiles();
// flushOpLog();
- dbexit(14);
+ dbexit(14);
}
-void mysighandler(int x) {
- signal(x, SIG_IGN);
- log() << "got kill or ctrl c signal " << x << ", will terminate after current cmd ends" << endl;
- {
- dblock lk;
- problem() << " now exiting" << endl;
- exit(12);
- }
+void mysighandler(int x) {
+ signal(x, SIG_IGN);
+ log() << "got kill or ctrl c signal " << x << ", will terminate after current cmd ends" << endl;
+ {
+ dblock lk;
+ problem() << " now exiting" << endl;
+ exit(12);
+ }
}
void setupSignals() {
- assert( signal(SIGINT, mysighandler) != SIG_ERR );
- assert( signal(SIGTERM, mysighandler) != SIG_ERR );
+ assert( signal(SIGINT, mysighandler) != SIG_ERR );
+ assert( signal(SIGTERM, mysighandler) != SIG_ERR );
}
#else
@@ -273,59 +273,59 @@ void setupSignals() {}
void repairDatabases() {
- dblock lk;
- boost::filesystem::path path( dbpath );
- for( boost::filesystem::directory_iterator i( path );
- i != boost::filesystem::directory_iterator(); ++i ) {
- string fileName = i->leaf();
- if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" ) {
- string dbName = fileName.substr( 0, fileName.length() - 3 );
- assert( !setClientTempNs( dbName.c_str() ) );
- PhysicalDataFile *p = database->getFile( 0 );
- PDFHeader *h = p->getHeader();
- if ( !h->currentVersion() ) {
- // QUESTION: Repair even if file format is higher version than code?
- log() << "repairing database " << dbName << " with pdfile version " << h->version << "." << h->versionMinor << ", ";
- log() << "new version: " << VERSION << "." << VERSION_MINOR << endl;
- repairDatabase( dbName.c_str() );
- } else {
- closeClient( dbName.c_str() );
- }
- }
- }
+ dblock lk;
+ boost::filesystem::path path( dbpath );
+ for ( boost::filesystem::directory_iterator i( path );
+ i != boost::filesystem::directory_iterator(); ++i ) {
+ string fileName = i->leaf();
+ if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" ) {
+ string dbName = fileName.substr( 0, fileName.length() - 3 );
+ assert( !setClientTempNs( dbName.c_str() ) );
+ PhysicalDataFile *p = database->getFile( 0 );
+ PDFHeader *h = p->getHeader();
+ if ( !h->currentVersion() ) {
+ // QUESTION: Repair even if file format is higher version than code?
+ log() << "repairing database " << dbName << " with pdfile version " << h->version << "." << h->versionMinor << ", ";
+ log() << "new version: " << VERSION << "." << VERSION_MINOR << endl;
+ repairDatabase( dbName.c_str() );
+ } else {
+ closeClient( dbName.c_str() );
+ }
+ }
+ }
}
-void initAndListen(int listenPort, const char *appserverLoc = null) {
- if( opLogging )
- log() << "opLogging = " << opLogging << endl;
- _oplog.init();
-
+void initAndListen(int listenPort, const char *appserverLoc = null) {
+ if ( opLogging )
+ log() << "opLogging = " << opLogging << endl;
+ _oplog.init();
+
#if !defined(_WIN32)
- assert( signal(SIGSEGV, segvhandler) != SIG_ERR );
+ assert( signal(SIGSEGV, segvhandler) != SIG_ERR );
#endif
#if !defined(_WIN32)
pid_t pid = 0;
pid = getpid();
#else
- int pid=0;
+ int pid=0;
#endif
-
- log() << "Mongo DB : starting : pid = " << pid << " port = " << port << " dbpath = " << dbpath
- << " master = " << master << " slave = " << slave << endl;
+
+ log() << "Mongo DB : starting : pid = " << pid << " port = " << port << " dbpath = " << dbpath
+ << " master = " << master << " slave = " << slave << endl;
#if !defined(NOJNI)
- if( useJNI ) {
- JavaJS = new JavaJSImpl(appserverLoc);
- javajstest();
+ if ( useJNI ) {
+ JavaJS = new JavaJSImpl(appserverLoc);
+ javajstest();
}
#endif
- setupSignals();
-
- repairDatabases();
+ setupSignals();
+
+ repairDatabases();
- listen(listenPort);
+ listen(listenPort);
}
//ofstream problems("dbproblems.log", ios_base::app | ios_base::out);
@@ -337,139 +337,139 @@ int main(int argc, char* argv[], char *envp[] )
{
unsigned x = 0x12345678;
unsigned char& b = (unsigned char&) x;
- if( b != 0x78 ) {
+ if ( b != 0x78 ) {
cout << "big endian cpus not yet supported" << endl;
return 33;
}
}
- DEV cout << "warning: DEV mode enabled\n";
+ DEV cout << "warning: DEV mode enabled\n";
#if !defined(_WIN32)
signal(SIGPIPE, pipeSigHandler);
#endif
- srand(curTimeMillis());
+ srand(curTimeMillis());
- UnitTest::runTests();
+ UnitTest::runTests();
- if( argc >= 2 ) {
- if( strcmp(argv[1], "quicktest") == 0 ) {
- quicktest();
- return 0;
- }
- if( strcmp(argv[1], "javatest") == 0 ) {
+ if ( argc >= 2 ) {
+ if ( strcmp(argv[1], "quicktest") == 0 ) {
+ quicktest();
+ return 0;
+ }
+ if ( strcmp(argv[1], "javatest") == 0 ) {
#if !defined(NOJNI)
JavaJS = new JavaJSImpl();
javajstest();
#else
cout << "NOJNI build cannot test" << endl;
#endif
- return 0;
- }
- if( strcmp(argv[1], "test2") == 0 ) {
- return test2();
- }
- if( strcmp(argv[1], "msg") == 0 ) {
-
- // msg(argc >= 3 ? argv[2] : "ping");
-
- const char *m = "ping";
- int thePort = DBPort;
-
- if (argc >= 3) {
- m = argv[2];
-
- if (argc > 3) {
- thePort = atoi(argv[3]);
- }
- }
-
- msg(m, "127.0.0.1", thePort);
-
- return 0;
- }
- if( strcmp(argv[1], "msglots") == 0 ) {
- msg(argc >= 3 ? argv[2] : "ping", 1000);
- return 0;
- }
- if( strcmp( argv[1], "testclient") == 0 ) {
- testClient();
- return 0;
- }
- if( strcmp(argv[1], "zzz") == 0 ) {
- msg(argc >= 3 ? argv[2] : "ping", 1000);
- return 0;
- }
- if( strcmp(argv[1], "run") == 0 ) {
- initAndListen(port);
- return 0;
- }
- if( strcmp(argv[1], "longmsg") == 0 ) {
- char buf[800000];
- memset(buf, 'a', 799999);
- buf[799999] = 0;
- buf[799998] = 'b';
- buf[0] = 'c';
- msg(buf);
- return 0;
- }
+ return 0;
+ }
+ if ( strcmp(argv[1], "test2") == 0 ) {
+ return test2();
+ }
+ if ( strcmp(argv[1], "msg") == 0 ) {
+
+ // msg(argc >= 3 ? argv[2] : "ping");
+
+ const char *m = "ping";
+ int thePort = DBPort;
+
+ if (argc >= 3) {
+ m = argv[2];
+
+ if (argc > 3) {
+ thePort = atoi(argv[3]);
+ }
+ }
+
+ msg(m, "127.0.0.1", thePort);
+
+ return 0;
+ }
+ if ( strcmp(argv[1], "msglots") == 0 ) {
+ msg(argc >= 3 ? argv[2] : "ping", 1000);
+ return 0;
+ }
+ if ( strcmp( argv[1], "testclient") == 0 ) {
+ testClient();
+ return 0;
+ }
+ if ( strcmp(argv[1], "zzz") == 0 ) {
+ msg(argc >= 3 ? argv[2] : "ping", 1000);
+ return 0;
+ }
+ if ( strcmp(argv[1], "run") == 0 ) {
+ initAndListen(port);
+ return 0;
+ }
+ if ( strcmp(argv[1], "longmsg") == 0 ) {
+ char buf[800000];
+ memset(buf, 'a', 799999);
+ buf[799999] = 0;
+ buf[799998] = 'b';
+ buf[0] = 'c';
+ msg(buf);
+ return 0;
+ }
/*
- * *** POST STANDARD SWITCH METHOD - if we don't satisfy, we switch to a
+ * *** POST STANDARD SWITCH METHOD - if we don't satisfy, we switch to a
* slightly different mode where "run" is assumed and we can set values
*/
-
+
char *appsrvPath = null;
-
+
for (int i = 1; i < argc; i++) {
-
- if( argv[i] == 0 ) continue;
- string s = argv[i];
- if( s == "--port" )
+ if ( argv[i] == 0 ) continue;
+ string s = argv[i];
+
+ if ( s == "--port" )
port = atoi(argv[++i]);
- else if( s == "--nojni" )
- useJNI = false;
- else if( s == "--master" )
- master = true;
- else if( s == "--slave" )
- slave = true;
- else if( s == "--help" || s == "-?" || s == "--?" )
+ else if ( s == "--nojni" )
+ useJNI = false;
+ else if ( s == "--master" )
+ master = true;
+ else if ( s == "--slave" )
+ slave = true;
+ else if ( s == "--help" || s == "-?" || s == "--?" )
goto usage;
- else if( s == "--quiet" )
+ else if ( s == "--quiet" )
quiet = true;
- else if( s == "--quota" )
+ else if ( s == "--quota" )
quota = true;
- else if( s == "--objcheck" )
+ else if ( s == "--objcheck" )
objcheck = true;
- else if( s == "--source" ) {
+ else if ( s == "--source" ) {
/* specifies what the source in local.sources should be */
dashDashSource = argv[++i];
- }
- else if( s == "--pairwith" ) {
- pairWith( argv[i+1], argv[i+2] );
+ }
+ else if ( s == "--pairwith" ) {
+ pairWith( argv[i+1], argv[i+2] );
i += 2;
- }
- else if( s == "--dbpath" )
- dbpath = argv[++i];
- else if( s == "--appsrvpath" )
+ }
+ else if ( s == "--dbpath" )
+ dbpath = argv[++i];
+ else if ( s == "--appsrvpath" )
appsrvPath = argv[++i];
- else if( s == "--nocursors" )
- useCursors = false;
- else if( strncmp(s.c_str(), "--oplog", 7) == 0 ) {
- int x = s[7] - '0';
- if( x < 0 || x > 7 ) {
- cout << "can't interpret --oplog setting" << endl;
- exit(13);
- }
- opLogging = x;
- }
+ else if ( s == "--nocursors" )
+ useCursors = false;
+ else if ( strncmp(s.c_str(), "--oplog", 7) == 0 ) {
+ int x = s[7] - '0';
+ if ( x < 0 || x > 7 ) {
+ cout << "can't interpret --oplog setting" << endl;
+ exit(13);
+ }
+ opLogging = x;
+ }
}
-
+
initAndListen(port, appsrvPath);
-
- exit(0);
- }
+
+ exit(0);
+ }
usage:
cout << "Mongo db ";
@@ -501,6 +501,6 @@ usage:
cout << " --source <server:port>" << endl;
cout << " --pairwith <server:port> <arbiter>" << endl;
cout << endl;
-
+
return 0;
}
diff --git a/db/db.h b/db/db.h
index 6a85d2f1323..667187a5cda 100644
--- a/db/db.h
+++ b/db/db.h
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -21,27 +21,30 @@
void jniCallback(Message& m, Message& out);
-class MutexInfo {
+class MutexInfo {
unsigned long long start, enter, timeLocked; // all in microseconds
int locked;
public:
- MutexInfo() : locked(0) {
+ MutexInfo() : locked(0) {
start = curTimeMicros64();
}
- void entered() {
+ void entered() {
enter = curTimeMicros64();
locked++;
assert( locked == 1 );
}
- void leaving() {
+ void leaving() {
locked--;
assert( locked == 0 );
timeLocked += curTimeMicros64() - enter;
}
- int isLocked() const { return locked; }
- void timingInfo(unsigned long long &s, unsigned long long &tl) {
- s = start; tl = timeLocked;
+ int isLocked() const {
+ return locked;
+ }
+ void timingInfo(unsigned long long &s, unsigned long long &tl) {
+ s = start;
+ tl = timeLocked;
}
};
@@ -50,38 +53,38 @@ extern MutexInfo dbMutexInfo;
//extern int dbLocked;
struct lock {
- boostlock bl_;
- MutexInfo& info_;
- lock( boost::mutex &mutex, MutexInfo &info ) :
- bl_( mutex ),
- info_( info ) {
- info_.entered();
- }
- ~lock() {
- info_.leaving();
- }
+ boostlock bl_;
+ MutexInfo& info_;
+ lock( boost::mutex &mutex, MutexInfo &info ) :
+ bl_( mutex ),
+ info_( info ) {
+ info_.entered();
+ }
+ ~lock() {
+ info_.leaving();
+ }
};
struct dblock : public lock {
- dblock() :
- lock( dbMutex, dbMutexInfo ) {
- }
+ dblock() :
+ lock( dbMutex, dbMutexInfo ) {
+ }
};
#include "boost/version.hpp"
-/* a scoped release of a mutex temporarily -- like a scopedlock but reversed.
+/* a scoped release of a mutex temporarily -- like a scopedlock but reversed.
*/
struct temprelease {
boost::mutex& m;
- temprelease(boost::mutex& _m) : m(_m) {
+ temprelease(boost::mutex& _m) : m(_m) {
#if BOOST_VERSION >= 103500
m.unlock();
#else
boost::detail::thread::lock_ops<boost::mutex>::unlock(m);
#endif
}
- ~temprelease() {
+ ~temprelease() {
#if BOOST_VERSION >= 103500
m.lock();
#else
@@ -99,44 +102,44 @@ extern const char *curNs;
extern bool master;
inline string getKey( const char *ns, const char *path ) {
- char cl[256];
- nsToClient(ns, cl);
- return string( cl ) + ":" + path;
+ char cl[256];
+ nsToClient(ns, cl);
+ return string( cl ) + ":" + path;
}
/* returns true if the database ("database") did not exist, and it was created on this call */
-inline bool setClient(const char *ns, const char *path=dbpath) {
- /* we must be in critical section at this point as these are global
- variables.
+inline bool setClient(const char *ns, const char *path=dbpath) {
+ /* we must be in critical section at this point as these are global
+ variables.
*/
assert( dbMutexInfo.isLocked() );
- curNs = ns;
- string key = getKey( ns, path );
- map<string,Database*>::iterator it = databases.find(key);
- if( it != databases.end() ) {
- database = it->second;
- return false;
- }
-
- // when master for replication, we advertise all the db's, and that
- // looks like a 'first operation'. so that breaks this log message's
+ curNs = ns;
+ string key = getKey( ns, path );
+ map<string,Database*>::iterator it = databases.find(key);
+ if ( it != databases.end() ) {
+ database = it->second;
+ return false;
+ }
+
+ // when master for replication, we advertise all the db's, and that
+ // looks like a 'first operation'. so that breaks this log message's
// meaningfulness. instead of fixing (which would be better), we just
// stop showing for now.
- // 2008-12-22 We now open every database on startup, so this log is
- // no longer helpful. Commenting.
+ // 2008-12-22 We now open every database on startup, so this log is
+ // no longer helpful. Commenting.
// if( !master )
// log() << "first operation for database " << key << endl;
- char cl[256];
- nsToClient(ns, cl);
- bool justCreated;
- Database *c = new Database(cl, justCreated, path);
- databases[key] = c;
- database = c;
+ char cl[256];
+ nsToClient(ns, cl);
+ bool justCreated;
+ Database *c = new Database(cl, justCreated, path);
+ databases[key] = c;
+ database = c;
database->finishInit();
-
- return justCreated;
+
+ return justCreated;
}
// shared functionality for removing references to a database from this program instance
@@ -144,28 +147,28 @@ inline bool setClient(const char *ns, const char *path=dbpath) {
void closeClient( const char *cl, const char *path = dbpath );
inline void eraseDatabase( const char *ns, const char *path=dbpath ) {
- string key = getKey( ns, path );
- databases.erase( key );
+ string key = getKey( ns, path );
+ databases.erase( key );
}
-/* We normally keep around a curNs ptr -- if this ns is temporary,
+/* We normally keep around a curNs ptr -- if this ns is temporary,
use this instead so we don't have a bad ptr. we could have made a copy,
but trying to be fast as we call setClient this for every single operation.
*/
-inline bool setClientTempNs(const char *ns) {
- bool jc = setClient(ns);
- curNs = "";
- return jc;
+inline bool setClientTempNs(const char *ns) {
+ bool jc = setClient(ns);
+ curNs = "";
+ return jc;
}
struct dbtemprelease {
string clientname;
- string clientpath;
+ string clientpath;
dbtemprelease() {
- if( database ) {
+ if ( database ) {
clientname = database->name;
- clientpath = database->path;
- }
+ clientpath = database->path;
+ }
dbMutexInfo.leaving();
#if BOOST_VERSION >= 103500
dbMutex.unlock();
@@ -173,14 +176,14 @@ struct dbtemprelease {
boost::detail::thread::lock_ops<boost::mutex>::unlock(dbMutex);
#endif
}
- ~dbtemprelease() {
+ ~dbtemprelease() {
#if BOOST_VERSION >= 103500
dbMutex.lock();
#else
boost::detail::thread::lock_ops<boost::mutex>::lock(dbMutex);
#endif
dbMutexInfo.entered();
- if( clientname.empty() )
+ if ( clientname.empty() )
database = 0;
else
setClient(clientname.c_str(), clientpath.c_str());
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 3707a116c09..41e73e073f3 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -1,16 +1,16 @@
-// dbcommands.cpp
+// dbcommands.cpp
/**
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -37,188 +37,205 @@ void flushOpLog();
int runCount(const char *ns, BSONObj& cmd, string& err);
void clean(const char *ns, NamespaceDetails *d) {
- for( int i = 0; i < Buckets; i++ )
- d->deletedList[i].Null();
+ for ( int i = 0; i < Buckets; i++ )
+ d->deletedList[i].Null();
}
string validateNS(const char *ns, NamespaceDetails *d) {
- bool valid = true;
- stringstream ss;
- ss << "\nvalidate\n";
- ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
- if( d->capped )
- ss << " capped:" << d->capped << " max:" << d->max << '\n';
-
- ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->ns.buf << '\n';
- ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->ns.buf << '\n';
- try {
- d->firstExtent.ext()->assertOk();
- d->lastExtent.ext()->assertOk();
- } catch(...) { valid=false; ss << " extent asserted "; }
-
- ss << " datasize?:" << d->datasize << " nrecords?:" << d->nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
- ss << " padding:" << d->paddingFactor << '\n';
- try {
-
- try {
- ss << " first extent:\n";
- d->firstExtent.ext()->dump(ss);
- valid = valid && d->firstExtent.ext()->validates();
- }
- catch(...) {
- ss << "\n exception firstextent\n" << endl;
- }
-
- auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
- int n = 0;
- long long len = 0;
- long long nlen = 0;
- set<DiskLoc> recs;
- int outOfOrder = 0;
- DiskLoc cl_last;
- while( c->ok() ) {
- n++;
-
- DiskLoc cl = c->currLoc();
- if( n < 1000000 )
- recs.insert(cl);
- if( d->capped ) {
- if( cl < cl_last )
- outOfOrder++;
- cl_last = cl;
- }
-
- Record *r = c->_current();
- len += r->lengthWithHeaders;
- nlen += r->netLength();
- c->advance();
- }
- if( d->capped ) {
- ss << " capped outOfOrder:" << outOfOrder;
- if( outOfOrder > 1 ) {
- valid = false;
- ss << " ???";
- }
- else ss << " (OK)";
- ss << '\n';
- }
- ss << " " << n << " objects found, nobj:" << d->nrecords << "\n";
- ss << " " << len << " bytes data w/headers\n";
- ss << " " << nlen << " bytes data wout/headers\n";
-
- ss << " deletedList: ";
- for( int i = 0; i < Buckets; i++ ) {
- ss << (d->deletedList[i].isNull() ? '0' : '1');
- }
- ss << endl;
- int ndel = 0;
- long long delSize = 0;
- int incorrect = 0;
- for( int i = 0; i < Buckets; i++ ) {
- DiskLoc loc = d->deletedList[i];
- try {
- int k = 0;
- while( !loc.isNull() ) {
- if( recs.count(loc) )
- incorrect++;
- ndel++;
-
- if( loc.questionable() ) {
- if( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
- ss << " ?bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k << endl;
- valid = false;
- break;
- }
- }
-
- DeletedRecord *d = loc.drec();
- delSize += d->lengthWithHeaders;
- loc = d->nextDeleted;
- k++;
- }
- } catch(...) { ss <<" ?exception in deleted chain for bucket " << i << endl; valid = false; }
- }
- ss << " deleted: n: " << ndel << " size: " << delSize << '\n';
- if( incorrect ) {
- ss << " ?corrupt: " << incorrect << " records from datafile are in deleted list\n";
- valid = false;
- }
-
- int idxn = 0;
- try {
- ss << " nIndexes:" << d->nIndexes << endl;
- for( ; idxn < d->nIndexes; idxn++ ) {
- ss << " " << d->indexes[idxn].indexNamespace() << " keys:" <<
- d->indexes[idxn].head.btree()->fullValidate(d->indexes[idxn].head) << endl;
- }
- }
- catch(...) {
- ss << "\n exception during index validate idxn:" << idxn << endl; valid=false;
- }
-
- }
- catch(AssertionException) {
- ss << "\n exception during validate\n" << endl;
- valid = false;
- }
-
- if( !valid )
- ss << " ns corrupt, requires dbchk\n";
-
- return ss.str();
+ bool valid = true;
+ stringstream ss;
+ ss << "\nvalidate\n";
+ ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
+ if ( d->capped )
+ ss << " capped:" << d->capped << " max:" << d->max << '\n';
+
+ ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->ns.buf << '\n';
+ ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->ns.buf << '\n';
+ try {
+ d->firstExtent.ext()->assertOk();
+ d->lastExtent.ext()->assertOk();
+ } catch (...) {
+ valid=false;
+ ss << " extent asserted ";
+ }
+
+ ss << " datasize?:" << d->datasize << " nrecords?:" << d->nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
+ ss << " padding:" << d->paddingFactor << '\n';
+ try {
+
+ try {
+ ss << " first extent:\n";
+ d->firstExtent.ext()->dump(ss);
+ valid = valid && d->firstExtent.ext()->validates();
+ }
+ catch (...) {
+ ss << "\n exception firstextent\n" << endl;
+ }
+
+ auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ int n = 0;
+ long long len = 0;
+ long long nlen = 0;
+ set<DiskLoc> recs;
+ int outOfOrder = 0;
+ DiskLoc cl_last;
+ while ( c->ok() ) {
+ n++;
+
+ DiskLoc cl = c->currLoc();
+ if ( n < 1000000 )
+ recs.insert(cl);
+ if ( d->capped ) {
+ if ( cl < cl_last )
+ outOfOrder++;
+ cl_last = cl;
+ }
+
+ Record *r = c->_current();
+ len += r->lengthWithHeaders;
+ nlen += r->netLength();
+ c->advance();
+ }
+ if ( d->capped ) {
+ ss << " capped outOfOrder:" << outOfOrder;
+ if ( outOfOrder > 1 ) {
+ valid = false;
+ ss << " ???";
+ }
+ else ss << " (OK)";
+ ss << '\n';
+ }
+ ss << " " << n << " objects found, nobj:" << d->nrecords << "\n";
+ ss << " " << len << " bytes data w/headers\n";
+ ss << " " << nlen << " bytes data wout/headers\n";
+
+ ss << " deletedList: ";
+ for ( int i = 0; i < Buckets; i++ ) {
+ ss << (d->deletedList[i].isNull() ? '0' : '1');
+ }
+ ss << endl;
+ int ndel = 0;
+ long long delSize = 0;
+ int incorrect = 0;
+ for ( int i = 0; i < Buckets; i++ ) {
+ DiskLoc loc = d->deletedList[i];
+ try {
+ int k = 0;
+ while ( !loc.isNull() ) {
+ if ( recs.count(loc) )
+ incorrect++;
+ ndel++;
+
+ if ( loc.questionable() ) {
+ if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
+ ss << " ?bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k << endl;
+ valid = false;
+ break;
+ }
+ }
+
+ DeletedRecord *d = loc.drec();
+ delSize += d->lengthWithHeaders;
+ loc = d->nextDeleted;
+ k++;
+ }
+ } catch (...) {
+ ss <<" ?exception in deleted chain for bucket " << i << endl;
+ valid = false;
+ }
+ }
+ ss << " deleted: n: " << ndel << " size: " << delSize << '\n';
+ if ( incorrect ) {
+ ss << " ?corrupt: " << incorrect << " records from datafile are in deleted list\n";
+ valid = false;
+ }
+
+ int idxn = 0;
+ try {
+ ss << " nIndexes:" << d->nIndexes << endl;
+ for ( ; idxn < d->nIndexes; idxn++ ) {
+ ss << " " << d->indexes[idxn].indexNamespace() << " keys:" <<
+ d->indexes[idxn].head.btree()->fullValidate(d->indexes[idxn].head) << endl;
+ }
+ }
+ catch (...) {
+ ss << "\n exception during index validate idxn:" << idxn << endl;
+ valid=false;
+ }
+
+ }
+ catch (AssertionException) {
+ ss << "\n exception during validate\n" << endl;
+ valid = false;
+ }
+
+ if ( !valid )
+ ss << " ns corrupt, requires dbchk\n";
+
+ return ss.str();
}
-class CmdDropDatabase : public Command {
+class CmdDropDatabase : public Command {
public:
- virtual bool logTheOp() { return true; }
- virtual bool slaveOk() { return false; }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
CmdDropDatabase() : Command("dropDatabase") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.findElement(name);
log() << "dropDatabase " << ns << endl;
int p = (int) e.number();
- if( p != 1 )
+ if ( p != 1 )
return false;
dropDatabase(ns);
return true;
}
} cmdDropDatabase;
-class CmdRepairDatabase : public Command {
+class CmdRepairDatabase : public Command {
public:
- virtual bool logTheOp() { return false; }
- virtual bool slaveOk() { return true; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdRepairDatabase() : Command("repairDatabase") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.findElement(name);
log() << "repairDatabase " << ns << endl;
int p = (int) e.number();
- if( p != 1 )
+ if ( p != 1 )
return false;
- e = cmdObj.findElement( "preserveClonedFilesOnFailure" );
- bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
- e = cmdObj.findElement( "backupOriginalFiles" );
- bool backupOriginalFiles = e.isBoolean() && e.boolean();
+ e = cmdObj.findElement( "preserveClonedFilesOnFailure" );
+ bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
+ e = cmdObj.findElement( "backupOriginalFiles" );
+ bool backupOriginalFiles = e.isBoolean() && e.boolean();
return repairDatabase( ns, preserveClonedFilesOnFailure, backupOriginalFiles );
}
} cmdRepairDatabase;
-/* set db profiling level
- todo: how do we handle profiling information put in the db with replication?
+/* set db profiling level
+ todo: how do we handle profiling information put in the db with replication?
sensibly or not?
*/
-class CmdProfile : public Command {
+class CmdProfile : public Command {
public:
- virtual bool slaveOk() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdProfile() : Command("profile") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.findElement(name);
result.append("was", (double) database->profile);
int p = (int) e.number();
bool ok = false;
- if( p == -1 )
+ if ( p == -1 )
ok = true;
- else if( p >= 0 && p <= 2 ) {
+ else if ( p >= 0 && p <= 2 ) {
ok = true;
database->profile = p;
}
@@ -226,9 +243,11 @@ public:
}
} cmdProfile;
-class CmdTimeInfo : public Command {
+class CmdTimeInfo : public Command {
public:
- virtual bool slaveOk() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdTimeInfo() : Command("timeinfo") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
unsigned long long last, start, timeLocked;
@@ -244,10 +263,12 @@ public:
} cmdTimeInfo;
/* just to check if the db has asserted */
-class CmdAssertInfo : public Command {
+class CmdAssertInfo : public Command {
public:
- virtual bool slaveOk() { return true; }
- CmdAssertInfo() : Command("assertinfo") {}
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdAssertInfo() : Command("assertinfo") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
result.appendBool("dbasserted", lastAssert[0].isSet() || lastAssert[1].isSet() || lastAssert[2].isSet());
result.appendBool("asserted", lastAssert[0].isSet() || lastAssert[1].isSet() || lastAssert[2].isSet() || lastAssert[3].isSet());
@@ -259,9 +280,11 @@ public:
}
} cmdAsserts;
-class CmdGetOpTime : public Command {
+class CmdGetOpTime : public Command {
public:
- virtual bool slaveOk() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdGetOpTime() : Command("getoptime") { }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
result.appendDate("optime", OpTime::now().asDate());
@@ -270,7 +293,7 @@ public:
} cmdgetoptime;
/*
-class Cmd : public Command {
+class Cmd : public Command {
public:
Cmd() : Command("") { }
bool adminOnly() { return true; }
@@ -280,11 +303,15 @@ public:
} cmd;
*/
-class CmdOpLogging : public Command {
+class CmdOpLogging : public Command {
public:
- virtual bool slaveOk() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdOpLogging() : Command("opLogging") { }
- bool adminOnly() { return true; }
+ bool adminOnly() {
+ return true;
+ }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
opLogging = (int) cmdObj.findElement(name).number();
flushOpLog();
@@ -294,21 +321,27 @@ public:
} cmdoplogging;
/* drop collection */
-class CmdDrop : public Command {
+class CmdDrop : public Command {
public:
CmdDrop() : Command("drop") { }
- virtual bool logTheOp() { return true; }
- virtual bool slaveOk() { return false; }
- virtual bool adminOnly() { return false; }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ virtual bool adminOnly() {
+ return false;
+ }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string nsToDrop = database->name + '.' + cmdObj.findElement(name).valuestr();
NamespaceDetails *d = nsdetails(nsToDrop.c_str());
log() << "CMD: drop " << nsToDrop << endl;
- if( d == 0 ) {
+ if ( d == 0 ) {
errmsg = "ns not found";
return false;
}
- if( d->nIndexes != 0 ) {
+ if ( d->nIndexes != 0 ) {
// client helper function is supposed to drop the indexes first
errmsg = "ns has indexes (not permitted on drop)";
return false;
@@ -320,22 +353,30 @@ public:
}
} cmdDrop;
-class CmdQueryTraceLevel : public Command {
+class CmdQueryTraceLevel : public Command {
public:
- virtual bool slaveOk() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdQueryTraceLevel() : Command("queryTraceLevel") { }
- bool adminOnly() { return true; }
+ bool adminOnly() {
+ return true;
+ }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
queryTraceLevel = (int) cmdObj.findElement(name).number();
return true;
}
} cmdquerytracelevel;
-class CmdTraceAll : public Command {
+class CmdTraceAll : public Command {
public:
- virtual bool slaveOk() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdTraceAll() : Command("traceAll") { }
- bool adminOnly() { return true; }
+ bool adminOnly() {
+ return true;
+ }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
queryTraceLevel = otherTraceLevel = (int) cmdObj.findElement(name).number();
return true;
@@ -343,22 +384,28 @@ public:
} cmdtraceall;
/* select count(*) */
-class CmdCount : public Command {
+class CmdCount : public Command {
public:
CmdCount() : Command("count") { }
- virtual bool logTheOp() { return false; }
- virtual bool slaveOk() { return false; }
- virtual bool adminOnly() { return false; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ virtual bool adminOnly() {
+ return false;
+ }
virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = database->name + '.' + cmdObj.findElement(name).valuestr();
string err;
int n = runCount(ns.c_str(), cmdObj, err);
int nn = n;
bool ok = true;
- if( n < 0 ) {
+ if ( n < 0 ) {
ok = false;
nn = 0;
- if( !err.empty() )
+ if ( !err.empty() )
errmsg = err;
}
result.append("n", (double) nn);
@@ -367,17 +414,23 @@ public:
} cmdCount;
/* create collection */
-class CmdCreate : public Command {
+class CmdCreate : public Command {
public:
CmdCreate() : Command("create") { }
- virtual bool logTheOp() { return true; }
- virtual bool slaveOk() { return false; }
- virtual bool adminOnly() { return false; }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ virtual bool adminOnly() {
+ return false;
+ }
virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = database->name + '.' + cmdObj.findElement(name).valuestr();
string err;
bool ok = userCreateNS(ns.c_str(), cmdObj, err, true);
- if( !ok && !err.empty() )
+ if ( !ok && !err.empty() )
errmsg = err;
return ok;
}
@@ -385,8 +438,12 @@ public:
class CmdDeleteIndexes : public Command {
public:
- virtual bool logTheOp() { return true; }
- virtual bool slaveOk() { return false; }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
CmdDeleteIndexes() : Command("deleteIndexes") { }
bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
/* note: temp implementation. space not reclaimed! */
@@ -394,22 +451,22 @@ public:
string toDeleteNs = database->name + '.' + e.valuestr();
NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
log() << "CMD: deleteIndexes " << toDeleteNs << endl;
- if( d ) {
+ if ( d ) {
BSONElement f = jsobj.findElement("index");
- if( !f.eoo() ) {
+ if ( !f.eoo() ) {
d->aboutToDeleteAnIndex();
ClientCursor::invalidate(toDeleteNs.c_str());
// delete a specific index or all?
- if( f.type() == String ) {
+ if ( f.type() == String ) {
const char *idxName = f.valuestr();
- if( *idxName == '*' && idxName[1] == 0 ) {
+ if ( *idxName == '*' && idxName[1] == 0 ) {
log() << " d->nIndexes was " << d->nIndexes << '\n';
anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
anObjBuilder.append("msg", "all indexes deleted for collection");
- for( int i = 0; i < d->nIndexes; i++ )
+ for ( int i = 0; i < d->nIndexes; i++ )
d->indexes[i].kill();
d->nIndexes = 0;
log() << " alpha implementation, space not reclaimed" << endl;
@@ -417,21 +474,21 @@ public:
else {
// delete just one index
int x = d->findIndexByName(idxName);
- if( x >= 0 ) {
+ if ( x >= 0 ) {
cout << " d->nIndexes was " << d->nIndexes << endl;
anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
- /* note it is important we remove the IndexDetails with this
+ /* note it is important we remove the IndexDetails with this
call, otherwise, on recreate, the old one would be reused, and its
IndexDetails::info ptr would be bad info.
*/
d->indexes[x].kill();
d->nIndexes--;
- for( int i = x; i < d->nIndexes; i++ )
+ for ( int i = x; i < d->nIndexes; i++ )
d->indexes[i] = d->indexes[i+1];
log() << "deleteIndexes: alpha implementation, space not reclaimed\n";
- } else {
+ } else {
log() << "deleteIndexes: " << idxName << " not found" << endl;
errmsg = "index not found";
return false;
@@ -444,7 +501,7 @@ public:
errmsg = "ns not found";
return false;
}
- return true;
+ return true;
}
} cmdDeleteIndexes;
@@ -457,15 +514,15 @@ extern map<string,Command*> *commands;
returns true if ran a cmd
*/
-bool _runCommands(const char *ns, BSONObj& _cmdobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- const char *p = strchr(ns, '.');
- if( !p ) return false;
- if( strcmp(p, ".$cmd") != 0 ) return false;
+bool _runCommands(const char *ns, BSONObj& _cmdobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
+ const char *p = strchr(ns, '.');
+ if ( !p ) return false;
+ if ( strcmp(p, ".$cmd") != 0 ) return false;
BSONObj jsobj;
{
BSONElement e = _cmdobj.firstElement();
- if( e.type() == Object && string("query") == e.fieldName() ) {
+ if ( e.type() == Object && string("query") == e.fieldName() ) {
jsobj = e.embeddedObject();
}
else {
@@ -473,81 +530,81 @@ bool _runCommands(const char *ns, BSONObj& _cmdobj, stringstream& ss, BufBuilder
}
}
- bool ok = false;
- bool valid = false;
+ bool ok = false;
+ bool valid = false;
- BSONElement e;
- e = jsobj.firstElement();
+ BSONElement e;
+ e = jsobj.firstElement();
map<string,Command*>::iterator i;
- if( e.eoo() )
+ if ( e.eoo() )
;
- /* check for properly registered command objects. Note that all the commands below should be
+ /* check for properly registered command objects. Note that all the commands below should be
migrated over to the command object format.
*/
- else if( (i = commands->find(e.fieldName())) != commands->end() ) {
+ else if ( (i = commands->find(e.fieldName())) != commands->end() ) {
valid = true;
string errmsg;
Command *c = i->second;
- if( c->adminOnly() && !fromRepl && strncmp(ns, "admin", p-ns) != 0 ) {
+ if ( c->adminOnly() && !fromRepl && strncmp(ns, "admin", p-ns) != 0 ) {
ok = false;
- errmsg = "access denied";
+ errmsg = "access denied";
}
- else if( !isMaster() && !c->slaveOk() && !fromRepl ) {
+ else if ( !isMaster() && !c->slaveOk() && !fromRepl ) {
/* todo: allow if Option_SlaveOk was set on the query */
ok = false;
errmsg = "not master";
}
else {
ok = c->run(ns, jsobj, errmsg, anObjBuilder, fromRepl);
- if( ok && c->logTheOp() && !fromRepl )
+ if ( ok && c->logTheOp() && !fromRepl )
logOp("c", ns, jsobj);
}
- if( !ok )
+ if ( !ok )
anObjBuilder.append("errmsg", errmsg);
}
- else if( e.type() == String ) {
- /* { count: "collectionname"[, query: <query>] } */
- string us(ns, p-ns);
+ else if ( e.type() == String ) {
+ /* { count: "collectionname"[, query: <query>] } */
+ string us(ns, p-ns);
/* we allow clean and validate on slaves */
- if( strcmp( e.fieldName(), "clean") == 0 ) {
- valid = true;
- string dropNs = us + '.' + e.valuestr();
- NamespaceDetails *d = nsdetails(dropNs.c_str());
- log() << "CMD: clean " << dropNs << endl;
- if( d ) {
- ok = true;
- anObjBuilder.append("ns", dropNs.c_str());
- clean(dropNs.c_str(), d);
- }
- else {
- anObjBuilder.append("errmsg", "ns not found");
- }
- }
- else if( strcmp( e.fieldName(), "validate") == 0 ) {
- valid = true;
- string toValidateNs = us + '.' + e.valuestr();
- NamespaceDetails *d = nsdetails(toValidateNs.c_str());
- log() << "CMD: validate " << toValidateNs << endl;
- if( d ) {
- ok = true;
- anObjBuilder.append("ns", toValidateNs.c_str());
- string s = validateNS(toValidateNs.c_str(), d);
- anObjBuilder.append("result", s.c_str());
- }
- else {
- anObjBuilder.append("errmsg", "ns not found");
- }
- }
- }
-
- if( !valid )
- anObjBuilder.append("errmsg", "no such cmd");
- anObjBuilder.append("ok", ok?1.0:0.0);
- BSONObj x = anObjBuilder.done();
- b.append((void*) x.objdata(), x.objsize());
- return true;
+ if ( strcmp( e.fieldName(), "clean") == 0 ) {
+ valid = true;
+ string dropNs = us + '.' + e.valuestr();
+ NamespaceDetails *d = nsdetails(dropNs.c_str());
+ log() << "CMD: clean " << dropNs << endl;
+ if ( d ) {
+ ok = true;
+ anObjBuilder.append("ns", dropNs.c_str());
+ clean(dropNs.c_str(), d);
+ }
+ else {
+ anObjBuilder.append("errmsg", "ns not found");
+ }
+ }
+ else if ( strcmp( e.fieldName(), "validate") == 0 ) {
+ valid = true;
+ string toValidateNs = us + '.' + e.valuestr();
+ NamespaceDetails *d = nsdetails(toValidateNs.c_str());
+ log() << "CMD: validate " << toValidateNs << endl;
+ if ( d ) {
+ ok = true;
+ anObjBuilder.append("ns", toValidateNs.c_str());
+ string s = validateNS(toValidateNs.c_str(), d);
+ anObjBuilder.append("result", s.c_str());
+ }
+ else {
+ anObjBuilder.append("errmsg", "ns not found");
+ }
+ }
+ }
+
+ if ( !valid )
+ anObjBuilder.append("errmsg", "no such cmd");
+ anObjBuilder.append("ok", ok?1.0:0.0);
+ BSONObj x = anObjBuilder.done();
+ b.append((void*) x.objdata(), x.objsize());
+ return true;
}
diff --git a/db/dbeval.cpp b/db/dbeval.cpp
index e345c78d089..1bd4841539f 100644
--- a/db/dbeval.cpp
+++ b/db/dbeval.cpp
@@ -3,16 +3,16 @@
*/
/**
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -33,71 +33,73 @@
const int edebug=0;
-bool dbEval(const char *ns, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
- BSONElement e = cmd.firstElement();
- assert( e.type() == Code || e.type() == CodeWScope );
- const char *code = e.type() == Code ? e.valuestr() : e.codeWScopeCode();
+bool dbEval(const char *ns, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
+ BSONElement e = cmd.firstElement();
+ assert( e.type() == Code || e.type() == CodeWScope );
+ const char *code = e.type() == Code ? e.valuestr() : e.codeWScopeCode();
+
+ if ( ! JavaJS ) {
+ errmsg = "db side execution is disabled";
+ return false;
+ }
- if ( ! JavaJS ) {
- errmsg = "db side execution is disabled";
- return false;
- }
-
#if !defined(NOJNI)
- jlong f = JavaJS->functionCreate(code);
- if( f == 0 ) {
- errmsg = "compile failed";
- return false;
- }
+ jlong f = JavaJS->functionCreate(code);
+ if ( f == 0 ) {
+ errmsg = "compile failed";
+ return false;
+ }
- Scope s;
- if ( e.type() == CodeWScope )
- s.init( e.codeWScopeScopeData() );
- s.setString("$client", database->name.c_str());
- BSONElement args = cmd.findElement("args");
- if( args.type() == Array ) {
- BSONObj eo = args.embeddedObject();
- if( edebug ) {
- cout << "args:" << eo.toString() << endl;
- cout << "code:\n" << code << endl;
- }
- s.setObject("args", eo);
- }
+ Scope s;
+ if ( e.type() == CodeWScope )
+ s.init( e.codeWScopeScopeData() );
+ s.setString("$client", database->name.c_str());
+ BSONElement args = cmd.findElement("args");
+ if ( args.type() == Array ) {
+ BSONObj eo = args.embeddedObject();
+ if ( edebug ) {
+ cout << "args:" << eo.toString() << endl;
+ cout << "code:\n" << code << endl;
+ }
+ s.setObject("args", eo);
+ }
- int res;
+ int res;
{
Timer t;
res = s.invoke(f);
int m = t.millis();
- if( m > 100 ) {
+ if ( m > 100 ) {
problem() << "dbeval time: " << dec << m << "ms " << ns << endl;
OCCASIONALLY log() << code << endl;
- else if( m >= 1000 ) log() << code << endl;
+ else if ( m >= 1000 ) log() << code << endl;
}
}
- if( res ) {
- result.append("errno", (double) res);
- errmsg = "invoke failed";
- return false;
- }
+ if ( res ) {
+ result.append("errno", (double) res);
+ errmsg = "invoke failed";
+ return false;
+ }
- int type = s.type("return");
- if( type == Object || type == Array )
- result.append("retval", s.getObject("return"));
- else if( type == NumberDouble )
- result.append("retval", s.getNumber("return"));
- else if( type == String )
- result.append("retval", s.getString("return").c_str());
- else if( type == Bool ) {
- result.appendBool("retval", s.getBoolean("return"));
- }
+ int type = s.type("return");
+ if ( type == Object || type == Array )
+ result.append("retval", s.getObject("return"));
+ else if ( type == NumberDouble )
+ result.append("retval", s.getNumber("return"));
+ else if ( type == String )
+ result.append("retval", s.getString("return").c_str());
+ else if ( type == Bool ) {
+ result.appendBool("retval", s.getBoolean("return"));
+ }
#endif
- return true;
+ return true;
}
-class CmdEval : public Command {
+class CmdEval : public Command {
public:
- virtual bool slaveOk() { return false; }
+ virtual bool slaveOk() {
+ return false;
+ }
CmdEval() : Command("$eval") { }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
return dbEval(ns, cmdObj, result, errmsg);
diff --git a/db/dbhelpers.cpp b/db/dbhelpers.cpp
index a5e0e14d6c6..21ceb981bcc 100644
--- a/db/dbhelpers.cpp
+++ b/db/dbhelpers.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -21,16 +21,16 @@
#include "dbhelpers.h"
#include "query.h"
-/* Get the first object from a collection. Generally only useful if the collection
+/* Get the first object from a collection. Generally only useful if the collection
only ever has a single object -- which is a "singleton collection.
- Returns: true if object exists.
+ Returns: true if object exists.
*/
bool getSingleton(const char *ns, BSONObj& result) {
DBContext context(ns);
auto_ptr<Cursor> c = DataFileMgr::findAll(ns);
- if( !c->ok() )
+ if ( !c->ok() )
return false;
result = c->current();
@@ -43,7 +43,7 @@ void putSingleton(const char *ns, BSONObj obj) {
updateObjects(ns, obj, /*pattern=*/emptyObj, /*upsert=*/true, ss);
}
-void emptyCollection(const char *ns) {
+void emptyCollection(const char *ns) {
DBContext context(ns);
deleteObjects(ns, emptyObj, false);
}
diff --git a/db/dbhelpers.h b/db/dbhelpers.h
index c169b4bd34d..9464e162c6e 100644
--- a/db/dbhelpers.h
+++ b/db/dbhelpers.h
@@ -2,39 +2,39 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/* db helpers are helper functions and classes that let us easily manipulate the local
+/* db helpers are helper functions and classes that let us easily manipulate the local
database instance.
*/
#pragma once
-/* Get/put the first object from a collection. Generally only useful if the collection
+/* Get/put the first object from a collection. Generally only useful if the collection
only ever has a single object -- which is a "singleton collection".
You do not need to set the database before calling.
- Returns: true if object exists.
+ Returns: true if object exists.
*/
bool getSingleton(const char *ns, BSONObj& result);
void putSingleton(const char *ns, BSONObj obj);
-/* Remove all objects from a collection.
+/* Remove all objects from a collection.
You do not need to set the database before calling.
*/
void emptyCollection(const char *ns);
@@ -42,16 +42,18 @@ void emptyCollection(const char *ns);
/* Set database we want to use, then, restores when we finish (are out of scope)
Note this is also helpful if an exception happens as the state if fixed up.
*/
-class DBContext {
+class DBContext {
Database *old;
public:
- DBContext(const char *ns) {
+ DBContext(const char *ns) {
old = database;
setClientTempNs(ns);
}
- DBContext(string ns) {
+ DBContext(string ns) {
old = database;
setClientTempNs(ns.c_str());
}
- ~DBContext() { database = old; }
+ ~DBContext() {
+ database = old;
+ }
};
diff --git a/db/dbinfo.cpp b/db/dbinfo.cpp
index 9c8300729ae..79cbea23bc7 100644
--- a/db/dbinfo.cpp
+++ b/db/dbinfo.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -19,8 +19,8 @@
#include "stdafx.h"
#include "db.h"
-void DBInfo::setHaveLogged() {
- if( haveLogged() )
+void DBInfo::setHaveLogged() {
+ if ( haveLogged() )
return;
NamespaceDetails *d = nsdetails(ns.c_str());
@@ -33,12 +33,12 @@ void DBInfo::setHaveLogged() {
int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god);
-void DBInfo::dbDropped() {
+void DBInfo::dbDropped() {
BSONObj empty;
deleteObjects(ns.c_str(), empty, false, false);
- /* do we also need to clear the info in 'dbs' in local.sources if we
-are a slave?
+ /* do we also need to clear the info in 'dbs' in local.sources if we
+ are a slave?
TODO if so. need to be careful not to mess up replications of dropDatabase().
*/
}
diff --git a/db/dbinfo.h b/db/dbinfo.h
index cb06595a6ab..cfd16d64b1f 100644
--- a/db/dbinfo.h
+++ b/db/dbinfo.h
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -24,32 +24,34 @@
{ haveLogged : true }
- haveLogged -- if true, we have already logged events to the oplog for this
+ haveLogged -- if true, we have already logged events to the oplog for this
database. missing implies false.
other attributes will be added later.
Note that class Database caches the DBInfo::haveLogged() value to keep things fast.
*/
-class DBInfo {
+class DBInfo {
string ns;
Database *dbold;
public:
- ~DBInfo() { database = dbold; }
- DBInfo(const char *db) {
+ ~DBInfo() {
+ database = dbold;
+ }
+ DBInfo(const char *db) {
dbold = database;
ns = string("local.dbinfo.") + db;
setClientTempNs(ns.c_str());
}
- BSONObj getDbInfoObj() {
+ BSONObj getDbInfoObj() {
auto_ptr<Cursor> c = DataFileMgr::findAll(ns.c_str());
- if( !c->ok() )
+ if ( !c->ok() )
return BSONObj();
return c->current();
}
- bool haveLogged() {
+ bool haveLogged() {
return getDbInfoObj().getBoolField("haveLogged");
}
@@ -57,14 +59,14 @@ public:
void dbDropped();
};
-inline void Database::setHaveLogged() {
- if( _haveLogged ) return;
+inline void Database::setHaveLogged() {
+ if ( _haveLogged ) return;
DBInfo i(name.c_str());
i.setHaveLogged();
_haveLogged = true;
}
-inline void Database::finishInit() {
+inline void Database::finishInit() {
DBInfo i(name.c_str());
_haveLogged = i.haveLogged();
}
diff --git a/db/dbmessage.h b/db/dbmessage.h
index b7e3bc22128..8076f73fa79 100644
--- a/db/dbmessage.h
+++ b/db/dbmessage.h
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -18,96 +18,102 @@
#include "jsobj.h"
#include "namespace.h"
-/* For the database/server protocol, these objects and functions encapsulate
+/* For the database/server protocol, these objects and functions encapsulate
the various messages transmitted over the connection.
*/
class DbMessage {
public:
- DbMessage(const Message& _m) : m(_m) {
- theEnd = _m.data->_data + _m.data->dataLen();
- int *r = (int *) _m.data->_data;
- reserved = *r;
- r++;
- data = (const char *) r;
- nextjsobj = data;
- }
-
- const char * getns() { return data; }
- void getns(Namespace& ns) {
- ns = data;
- }
-
- int pullInt() {
- if( nextjsobj == data )
- nextjsobj += strlen(data) + 1; // skip namespace
- int i = *((int *)nextjsobj);
- nextjsobj += 4;
- return i;
- }
- long long pullInt64() {
- if( nextjsobj == data )
- nextjsobj += strlen(data) + 1; // skip namespace
- long long i = *((long long *)nextjsobj);
- nextjsobj += 8;
- return i;
- }
-
- OID* getOID() {
- return (OID *) (data + strlen(data) + 1); // skip namespace
- }
-
- void getQueryStuff(const char *&query, int& ntoreturn) {
- int *i = (int *) (data + strlen(data) + 1);
- ntoreturn = *i;
- i++;
- query = (const char *) i;
- }
-
- /* for insert and update msgs */
- bool moreJSObjs() { return nextjsobj != 0; }
- BSONObj nextJsObj() {
- if( nextjsobj == data )
- nextjsobj += strlen(data) + 1; // skip namespace
- BSONObj js(nextjsobj);
- assert( js.objsize() < ( theEnd - data ) );
- if( js.objsize() <= 0 )
- nextjsobj = null;
- else {
- nextjsobj += js.objsize();
- if( nextjsobj >= theEnd )
- nextjsobj = 0;
- }
- return js;
- }
-
- const Message& msg() { return m; }
+ DbMessage(const Message& _m) : m(_m) {
+ theEnd = _m.data->_data + _m.data->dataLen();
+ int *r = (int *) _m.data->_data;
+ reserved = *r;
+ r++;
+ data = (const char *) r;
+ nextjsobj = data;
+ }
+
+ const char * getns() {
+ return data;
+ }
+ void getns(Namespace& ns) {
+ ns = data;
+ }
+
+ int pullInt() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ int i = *((int *)nextjsobj);
+ nextjsobj += 4;
+ return i;
+ }
+ long long pullInt64() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ long long i = *((long long *)nextjsobj);
+ nextjsobj += 8;
+ return i;
+ }
+
+ OID* getOID() {
+ return (OID *) (data + strlen(data) + 1); // skip namespace
+ }
+
+ void getQueryStuff(const char *&query, int& ntoreturn) {
+ int *i = (int *) (data + strlen(data) + 1);
+ ntoreturn = *i;
+ i++;
+ query = (const char *) i;
+ }
+
+ /* for insert and update msgs */
+ bool moreJSObjs() {
+ return nextjsobj != 0;
+ }
+ BSONObj nextJsObj() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ BSONObj js(nextjsobj);
+ assert( js.objsize() < ( theEnd - data ) );
+ if ( js.objsize() <= 0 )
+ nextjsobj = null;
+ else {
+ nextjsobj += js.objsize();
+ if ( nextjsobj >= theEnd )
+ nextjsobj = 0;
+ }
+ return js;
+ }
+
+ const Message& msg() {
+ return m;
+ }
private:
- const Message& m;
- int reserved;
- const char *data;
- const char *nextjsobj;
- const char *theEnd;
+ const Message& m;
+ int reserved;
+ const char *data;
+ const char *nextjsobj;
+ const char *theEnd;
};
/* a request to run a query, received from the database */
-class QueryMessage {
+class QueryMessage {
public:
const char *ns;
int ntoskip;
int ntoreturn;
int queryOptions;
BSONObj query;
- auto_ptr< set<string> > fields;
+ auto_ptr< set<string> > fields;
/* parses the message into the above fields */
- QueryMessage(DbMessage& d) {
+ QueryMessage(DbMessage& d) {
ns = d.getns();
ntoskip = d.pullInt();
ntoreturn = d.pullInt();
query = d.nextJsObj();
- if( d.moreJSObjs() ) {
+ if ( d.moreJSObjs() ) {
fields = auto_ptr< set<string> >(new set<string>());
d.nextJsObj().getFieldNames(*fields);
}
@@ -115,13 +121,13 @@ public:
}
};
-#include "../client/dbclient.h"
+#include "../client/dbclient.h"
inline void replyToQuery(MessagingPort& p, Message& requestMsg,
void *data, int size,
- int nReturned, int startingFrom = 0,
+ int nReturned, int startingFrom = 0,
long long cursorId = 0
- ) {
+ ) {
BufBuilder b(32768);
b.skip(sizeof(QueryResult));
b.append(data, size);
@@ -144,9 +150,9 @@ inline void replyToQuery(MessagingPort& p, Message& requestMsg,
//#include "bsonobj.h"
inline void replyToQuery(MessagingPort& p, Message& requestMsg,
- BSONObj& responseObj)
+ BSONObj& responseObj)
{
replyToQuery(
- p, requestMsg,
+ p, requestMsg,
(void *) responseObj.objdata(), responseObj.objsize(), 1);
}
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index ea9d48c39d7..6741b52b5f6 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -30,7 +30,7 @@ bool getInitialSyncCompleted();
time_t started = time(0);
/*
- string toString() {
+ string toString() {
stringstream ss;
unsigned long long dt = last - start;
ss << dt/1000;
@@ -42,8 +42,10 @@ time_t started = time(0);
}
*/
-struct Timing {
- Timing() { start = timeLocked = 0; }
+struct Timing {
+ Timing() {
+ start = timeLocked = 0;
+ }
unsigned long long start, timeLocked;
};
Timing tlast;
@@ -54,7 +56,7 @@ extern bool quiet;
void statsThread() {
unsigned long long timeLastPass = 0;
- while( 1 ) {
+ while ( 1 ) {
{
Timer lktm;
dblock lk;
@@ -62,17 +64,17 @@ void statsThread() {
Timing timing;
dbMutexInfo.timingInfo(timing.start, timing.timeLocked);
unsigned long long now = curTimeMicros64();
- if( timeLastPass ) {
+ if ( timeLastPass ) {
unsigned long long dt = now - timeLastPass;
unsigned long long dlocked = timing.timeLocked - tlast.timeLocked;
{
stringstream ss;
ss << dt / 1000 << '\t';
ss << dlocked / 1000 << '\t';
- if( dt )
+ if ( dt )
ss << (dlocked*100)/dt << '%';
string s = ss.str();
- if( !quiet )
+ if ( !quiet )
log() << "cpu: " << s << endl;
lockStats[q] = s;
}
@@ -87,18 +89,20 @@ void statsThread() {
unsigned byLocSize();
bool _bold;
-string bold(bool x) {
+string bold(bool x) {
_bold = x;
return x ? "<b>" : "";
}
-string bold() { return _bold ? "</b>" : ""; }
+string bold() {
+ return _bold ? "</b>" : "";
+}
-class DbWebServer : public MiniWebServer {
+class DbWebServer : public MiniWebServer {
public:
// caller locks
- void doLockedStuff(stringstream& ss) {
+ void doLockedStuff(stringstream& ss) {
ss << "# databases: " << databases.size() << '\n';
- if( database ) {
+ if ( database ) {
ss << "curclient: " << database->name;
ss << '\n';
}
@@ -106,37 +110,37 @@ public:
ss << "\n<b>replication</b>\n";
ss << "master: " << master << '\n';
ss << "slave: " << slave << '\n';
- if( replPair ) {
+ if ( replPair ) {
ss << "replpair:\n";
ss << replPair->getInfo();
}
bool seemCaughtUp = getInitialSyncCompleted();
- if( !seemCaughtUp ) ss << "<b>";
+ if ( !seemCaughtUp ) ss << "<b>";
ss << "initialSyncCompleted: " << seemCaughtUp;
- if( !seemCaughtUp ) ss << "</b>";
+ if ( !seemCaughtUp ) ss << "</b>";
ss << '\n';
ss << "\n<b>dt\ttlocked</b>\n";
unsigned i = q;
- while( 1 ) {
+ while ( 1 ) {
ss << lockStats[i] << '\n';
i = (i-1)%NStats;
- if( i == q )
+ if ( i == q )
break;
}
}
- void doUnlockedStuff(stringstream& ss) {
+ void doUnlockedStuff(stringstream& ss) {
ss << "port: " << port << '\n';
ss << "dblocked: " << dbMutexInfo.isLocked() << " (initial)\n";
ss << "uptime: " << time(0)-started << " seconds\n";
- if( allDead )
+ if ( allDead )
ss << "<b>replication allDead=" << allDead << "</b>\n";
ss << "\nassertions:\n";
- for( int i = 0; i < 4; i++ ) {
- if( lastAssert[i].isSet() ) {
+ for ( int i = 0; i < 4; i++ ) {
+ if ( lastAssert[i].isSet() ) {
ss << "<b>";
- if( i == 3 ) ss << "usererr";
+ if ( i == 3 ) ss << "usererr";
else ss << i;
ss << "</b>" << ' ' << lastAssert[i].toString();
}
@@ -149,10 +153,10 @@ public:
const char *rq, // the full request
string url,
// set these and return them:
- string& responseMsg,
+ string& responseMsg,
int& responseCode,
vector<string>& headers // if completely empty, content-type: text/html will be added
- )
+ )
{
responseCode = 200;
stringstream ss;
@@ -169,9 +173,9 @@ public:
doUnlockedStuff(ss);
int n = 2000;
- Timer t;
- while( 1 ) {
- if( !dbMutexInfo.isLocked() ) {
+ Timer t;
+ while ( 1 ) {
+ if ( !dbMutexInfo.isLocked() ) {
{
dblock lk;
ss << "time to get dblock: " << t.millis() << "ms\n";
@@ -180,7 +184,7 @@ public:
break;
}
sleepmillis(1);
- if( --n < 0 ) {
+ if ( --n < 0 ) {
ss << "\n<b>timed out getting dblock</b>\n";
break;
}
@@ -194,6 +198,6 @@ public:
void webServerThread() {
boost::thread thr(statsThread);
DbWebServer mini;
- if( mini.init(port+1000) )
+ if ( mini.init(port+1000) )
mini.run();
}
diff --git a/db/instance.cpp b/db/instance.cpp
index edef60e7916..265d147f742 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -3,16 +3,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -26,7 +26,7 @@
#include "instance.h"
int nloggedsome = 0;
-#define LOGSOME if( ++nloggedsome < 1000 || nloggedsome % 100 == 0 )
+#define LOGSOME if( ++nloggedsome < 1000 || nloggedsome % 100 == 0 )
bool objcheck = false;
bool quota = false;
@@ -39,236 +39,240 @@ MutexInfo dbMutexInfo;
//int dbLocked = 0;
int port = DBPort;
-/* 0 = off; 1 = writes, 2 = reads, 3 = both
+/* 0 = off; 1 = writes, 2 = reads, 3 = both
7 = log a few reads, and all writes.
*/
int opLogging = 0;
-int getOpLogging() { return opLogging; }
+int getOpLogging() {
+ return opLogging;
+}
OpLog _oplog;
//#define oplog (*(_oplog.f))
bool useCursors = true;
void closeAllSockets();
-void flushOpLog() { _oplog.flush(); }
+void flushOpLog() {
+ _oplog.flush();
+}
int ctr = 0;
bool quiet = false;
// Returns false when request includes 'end'
bool assembleResponse( Message &m, DbResponse &dbresponse ) {
- dblock lk;
-
- stringstream ss;
- char buf[64];
- time_t_to_String(time(0), buf);
- buf[20] = 0; // don't want the year
- ss << buf;
- // ss << curTimeMillis() % 10000 << ' ';
-
- Timer t;
- database = 0;
- curOp = 0;
-
- int ms;
- bool log = false;
- curOp = m.data->operation();
-
+ dblock lk;
+
+ stringstream ss;
+ char buf[64];
+ time_t_to_String(time(0), buf);
+ buf[20] = 0; // don't want the year
+ ss << buf;
+ // ss << curTimeMillis() % 10000 << ' ';
+
+ Timer t;
+ database = 0;
+ curOp = 0;
+
+ int ms;
+ bool log = false;
+ curOp = m.data->operation();
+
#if 0
- /* use this if you only want to process operations for a particular namespace.
- maybe add to cmd line parms or something fancier.
- */
- DbMessage ddd(m);
- if( strncmp(ddd.getns(), "clusterstock", 12) != 0 ) {
- static int q;
- if( ++q < 20 )
- cout << "TEMP skip " << ddd.getns() << endl;
- goto skip;
- }
+ /* use this if you only want to process operations for a particular namespace.
+ maybe add to cmd line parms or something fancier.
+ */
+ DbMessage ddd(m);
+ if ( strncmp(ddd.getns(), "clusterstock", 12) != 0 ) {
+ static int q;
+ if ( ++q < 20 )
+ cout << "TEMP skip " << ddd.getns() << endl;
+ goto skip;
+ }
#endif
-
- if( m.data->operation() == dbMsg ) {
- ss << "msg ";
- char *p = m.data->_data;
- int len = strlen(p);
- if( len > 400 )
- cout << curTimeMillis() % 10000 <<
- " long msg received, len:" << len <<
- " ends with: " << p + len - 10 << endl;
- bool end = strcmp("end", p) == 0;
- Message *resp = new Message();
- resp->setData(opReply, "i am fine");
- dbresponse.response = resp;
- dbresponse.responseTo = m.data->id;
- //dbMsgPort.reply(m, resp);
- if( end )
- return false;
- }
- else if( m.data->operation() == dbQuery ) {
- receivedQuery(dbresponse, m, ss, true);
- }
- else if( m.data->operation() == dbInsert ) {
- OPWRITE;
- try {
- ss << "insert ";
- receivedInsert(m, ss);
- }
- catch( AssertionException& e ) {
+
+ if ( m.data->operation() == dbMsg ) {
+ ss << "msg ";
+ char *p = m.data->_data;
+ int len = strlen(p);
+ if ( len > 400 )
+ cout << curTimeMillis() % 10000 <<
+ " long msg received, len:" << len <<
+ " ends with: " << p + len - 10 << endl;
+ bool end = strcmp("end", p) == 0;
+ Message *resp = new Message();
+ resp->setData(opReply, "i am fine");
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.data->id;
+ //dbMsgPort.reply(m, resp);
+ if ( end )
+ return false;
+ }
+ else if ( m.data->operation() == dbQuery ) {
+ receivedQuery(dbresponse, m, ss, true);
+ }
+ else if ( m.data->operation() == dbInsert ) {
+ OPWRITE;
+ try {
+ ss << "insert ";
+ receivedInsert(m, ss);
+ }
+ catch ( AssertionException& e ) {
LOGSOME problem() << " Caught Assertion insert, continuing\n";
- ss << " exception " + e.toString();
- }
- }
- else if( m.data->operation() == dbUpdate ) {
- OPWRITE;
- try {
- ss << "update ";
- receivedUpdate(m, ss);
- }
- catch( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion update, continuing" << endl;
- ss << " exception " + e.toString();
- }
- }
- else if( m.data->operation() == dbDelete ) {
- OPWRITE;
- try {
- ss << "remove ";
- receivedDelete(m);
- }
- catch( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion receivedDelete, continuing" << endl;
- ss << " exception " + e.toString();
- }
- }
- else if( m.data->operation() == dbGetMore ) {
- OPREAD;
- DEV log = true;
- ss << "getmore ";
- receivedGetMore(dbresponse, m, ss);
- }
- else if( m.data->operation() == dbKillCursors ) {
- OPREAD;
- try {
- log = true;
- ss << "killcursors ";
- receivedKillCursors(m);
- }
- catch( AssertionException& e ) {
- problem() << " Caught Assertion in kill cursors, continuing" << endl;
- ss << " exception " + e.toString();
- }
- }
- else {
- cout << " operation isn't supported: " << m.data->operation() << endl;
- assert(false);
- }
-
- ms = t.millis();
- log = log || (ctr++ % 512 == 0 && !quiet);
- DEV log = true;
- if( log || ms > 100 ) {
- ss << ' ' << t.millis() << "ms";
- cout << ss.str().c_str() << endl;
- }
- if( database && database->profile >= 1 ) {
- if( database->profile >= 2 || ms >= 100 ) {
- // profile it
- profile(ss.str().c_str()+20/*skip ts*/, ms);
- }
- }
-
- return true;
+ ss << " exception " + e.toString();
+ }
+ }
+ else if ( m.data->operation() == dbUpdate ) {
+ OPWRITE;
+ try {
+ ss << "update ";
+ receivedUpdate(m, ss);
+ }
+ catch ( AssertionException& e ) {
+ LOGSOME problem() << " Caught Assertion update, continuing" << endl;
+ ss << " exception " + e.toString();
+ }
+ }
+ else if ( m.data->operation() == dbDelete ) {
+ OPWRITE;
+ try {
+ ss << "remove ";
+ receivedDelete(m);
+ }
+ catch ( AssertionException& e ) {
+ LOGSOME problem() << " Caught Assertion receivedDelete, continuing" << endl;
+ ss << " exception " + e.toString();
+ }
+ }
+ else if ( m.data->operation() == dbGetMore ) {
+ OPREAD;
+ DEV log = true;
+ ss << "getmore ";
+ receivedGetMore(dbresponse, m, ss);
+ }
+ else if ( m.data->operation() == dbKillCursors ) {
+ OPREAD;
+ try {
+ log = true;
+ ss << "killcursors ";
+ receivedKillCursors(m);
+ }
+ catch ( AssertionException& e ) {
+ problem() << " Caught Assertion in kill cursors, continuing" << endl;
+ ss << " exception " + e.toString();
+ }
+ }
+ else {
+ cout << " operation isn't supported: " << m.data->operation() << endl;
+ assert(false);
+ }
+
+ ms = t.millis();
+ log = log || (ctr++ % 512 == 0 && !quiet);
+ DEV log = true;
+ if ( log || ms > 100 ) {
+ ss << ' ' << t.millis() << "ms";
+ cout << ss.str().c_str() << endl;
+ }
+ if ( database && database->profile >= 1 ) {
+ if ( database->profile >= 2 || ms >= 100 ) {
+ // profile it
+ profile(ss.str().c_str()+20/*skip ts*/, ms);
+ }
+ }
+
+ return true;
}
void killCursors(int n, long long *ids);
void receivedKillCursors(Message& m) {
- int *x = (int *) m.data->_data;
- x++; // reserved
- int n = *x++;
- assert( n >= 1 );
- if( n > 2000 ) {
- problem() << "Assertion failure, receivedKillCursors, n=" << n << endl;
- assert( n < 30000 );
- }
- killCursors(n, (long long *) x);
+ int *x = (int *) m.data->_data;
+ x++; // reserved
+ int n = *x++;
+ assert( n >= 1 );
+ if ( n > 2000 ) {
+ problem() << "Assertion failure, receivedKillCursors, n=" << n << endl;
+ assert( n < 30000 );
+ }
+ killCursors(n, (long long *) x);
}
void closeClient( const char *cl, const char *path ) {
/* reset haveLogged in local.dbinfo */
- if( string("local") != cl ) {
+ if ( string("local") != cl ) {
DBInfo i(cl);
i.dbDropped();
}
-
- /* important: kill all open cursors on the database */
- string prefix(cl);
- prefix += '.';
- ClientCursor::invalidate(prefix.c_str());
-
- eraseDatabase( cl, path );
- delete database; // closes files
- database = 0;
+
+ /* important: kill all open cursors on the database */
+ string prefix(cl);
+ prefix += '.';
+ ClientCursor::invalidate(prefix.c_str());
+
+ eraseDatabase( cl, path );
+ delete database; // closes files
+ database = 0;
}
void receivedUpdate(Message& m, stringstream& ss) {
- DbMessage d(m);
- const char *ns = d.getns();
- assert(*ns);
- setClient(ns);
- //if( database->profile )
+ DbMessage d(m);
+ const char *ns = d.getns();
+ assert(*ns);
+ setClient(ns);
+ //if( database->profile )
ss << ns << ' ';
- int flags = d.pullInt();
- BSONObj query = d.nextJsObj();
+ int flags = d.pullInt();
+ BSONObj query = d.nextJsObj();
- assert( d.moreJSObjs() );
- assert( query.objsize() < m.data->dataLen() );
- BSONObj toupdate = d.nextJsObj();
+ assert( d.moreJSObjs() );
+ assert( query.objsize() < m.data->dataLen() );
+ BSONObj toupdate = d.nextJsObj();
- assert( toupdate.objsize() < m.data->dataLen() );
- assert( query.objsize() + toupdate.objsize() < m.data->dataLen() );
- updateObjects(ns, toupdate, query, flags & 1, ss);
+ assert( toupdate.objsize() < m.data->dataLen() );
+ assert( query.objsize() + toupdate.objsize() < m.data->dataLen() );
+ updateObjects(ns, toupdate, query, flags & 1, ss);
}
void receivedDelete(Message& m) {
- DbMessage d(m);
- const char *ns = d.getns();
- assert(*ns);
- setClient(ns);
- int flags = d.pullInt();
- bool justOne = flags & 1;
- assert( d.moreJSObjs() );
- BSONObj pattern = d.nextJsObj();
- deleteObjects(ns, pattern, justOne);
- logOp("d", ns, pattern, 0, &justOne);
+ DbMessage d(m);
+ const char *ns = d.getns();
+ assert(*ns);
+ setClient(ns);
+ int flags = d.pullInt();
+ bool justOne = flags & 1;
+ assert( d.moreJSObjs() );
+ BSONObj pattern = d.nextJsObj();
+ deleteObjects(ns, pattern, justOne);
+ logOp("d", ns, pattern, 0, &justOne);
}
void receivedQuery(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss, bool logit) {
- MSGID responseTo = m.data->id;
+ MSGID responseTo = m.data->id;
- DbMessage d(m);
+ DbMessage d(m);
QueryMessage q(d);
- if( opLogging && logit ) {
- if( strstr(q.ns, ".$cmd") ) {
- /* $cmd queries are "commands" and usually best treated as write operations */
- OPWRITE;
- }
- else {
- OPREAD;
- }
- }
-
- setClient(q.ns);
- QueryResult* msgdata;
-
- try {
- msgdata = runQuery(m, q.ns, q.ntoskip, q.ntoreturn, q.query, q.fields, ss, q.queryOptions);
- }
- catch( AssertionException& e ) {
- ss << " exception ";
- LOGSOME problem() << " Caught Assertion in runQuery ns:" << q.ns << ' ' << e.toString() << '\n';
- log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << '\n';
- if( q.query.valid() )
+ if ( opLogging && logit ) {
+ if ( strstr(q.ns, ".$cmd") ) {
+ /* $cmd queries are "commands" and usually best treated as write operations */
+ OPWRITE;
+ }
+ else {
+ OPREAD;
+ }
+ }
+
+ setClient(q.ns);
+ QueryResult* msgdata;
+
+ try {
+ msgdata = runQuery(m, q.ns, q.ntoskip, q.ntoreturn, q.query, q.fields, ss, q.queryOptions);
+ }
+ catch ( AssertionException& e ) {
+ ss << " exception ";
+ LOGSOME problem() << " Caught Assertion in runQuery ns:" << q.ns << ' ' << e.toString() << '\n';
+ log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << '\n';
+ if ( q.query.valid() )
log() << " query:" << q.query.toString() << endl;
else
log() << " query object is not valid!" << endl;
@@ -285,95 +289,95 @@ void receivedQuery(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, *
// though to do that.
msgdata = (QueryResult *) b.buf();
b.decouple();
- QueryResult *qr = msgdata;
- qr->_data[0] = 0;
- qr->_data[1] = 0;
- qr->_data[2] = 0;
- qr->_data[3] = 0;
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->cursorId = 0;
- qr->startingFrom = 0;
- qr->nReturned = 1;
-
- }
- Message *resp = new Message();
- resp->setData(msgdata, true); // transport will free
- dbresponse.response = resp;
- dbresponse.responseTo = responseTo;
- if( database ) {
- if( database->profile )
- ss << " bytes:" << resp->data->dataLen();
- }
- else {
- if( strstr(q.ns, "$cmd") == 0 ) // (this condition is normal for $cmd dropDatabase)
- log() << "ERROR: receiveQuery: database is null; ns=" << q.ns << endl;
- }
- // dbMsgPort.reply(m, resp, responseTo);
+ QueryResult *qr = msgdata;
+ qr->_data[0] = 0;
+ qr->_data[1] = 0;
+ qr->_data[2] = 0;
+ qr->_data[3] = 0;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+
+ }
+ Message *resp = new Message();
+ resp->setData(msgdata, true); // transport will free
+ dbresponse.response = resp;
+ dbresponse.responseTo = responseTo;
+ if ( database ) {
+ if ( database->profile )
+ ss << " bytes:" << resp->data->dataLen();
+ }
+ else {
+ if ( strstr(q.ns, "$cmd") == 0 ) // (this condition is normal for $cmd dropDatabase)
+ log() << "ERROR: receiveQuery: database is null; ns=" << q.ns << endl;
+ }
+ // dbMsgPort.reply(m, resp, responseTo);
}
QueryResult* emptyMoreResult(long long);
void receivedGetMore(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss) {
- DbMessage d(m);
- const char *ns = d.getns();
- ss << ns;
- setClient(ns);
- int ntoreturn = d.pullInt();
- long long cursorid = d.pullInt64();
- ss << " cid:" << cursorid;
- ss << " ntoreturn:" << ntoreturn;
- QueryResult* msgdata;
- try {
- msgdata = getMore(ns, ntoreturn, cursorid);
- }
- catch( AssertionException& e ) {
- ss << " exception " + e.toString();
- msgdata = emptyMoreResult(cursorid);
- }
- Message *resp = new Message();
- resp->setData(msgdata, true);
- ss << " bytes:" << resp->data->dataLen();
- ss << " nreturned:" << msgdata->nReturned;
- dbresponse.response = resp;
- dbresponse.responseTo = m.data->id;
- //dbMsgPort.reply(m, resp);
+ DbMessage d(m);
+ const char *ns = d.getns();
+ ss << ns;
+ setClient(ns);
+ int ntoreturn = d.pullInt();
+ long long cursorid = d.pullInt64();
+ ss << " cid:" << cursorid;
+ ss << " ntoreturn:" << ntoreturn;
+ QueryResult* msgdata;
+ try {
+ msgdata = getMore(ns, ntoreturn, cursorid);
+ }
+ catch ( AssertionException& e ) {
+ ss << " exception " + e.toString();
+ msgdata = emptyMoreResult(cursorid);
+ }
+ Message *resp = new Message();
+ resp->setData(msgdata, true);
+ ss << " bytes:" << resp->data->dataLen();
+ ss << " nreturned:" << msgdata->nReturned;
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.data->id;
+ //dbMsgPort.reply(m, resp);
}
void receivedInsert(Message& m, stringstream& ss) {
- DbMessage d(m);
- while( d.moreJSObjs() ) {
- BSONObj js = d.nextJsObj();
- const char *ns = d.getns();
- assert(*ns);
- setClient(ns);
- ss << ns;
-
- if( objcheck && !js.valid() ) {
+ DbMessage d(m);
+ while ( d.moreJSObjs() ) {
+ BSONObj js = d.nextJsObj();
+ const char *ns = d.getns();
+ assert(*ns);
+ setClient(ns);
+ ss << ns;
+
+ if ( objcheck && !js.valid() ) {
problem() << "insert error ns: " << ns << '\n';
uassert("insert: bad object from client", false);
}
- theDataFileMgr.insert(ns, (void*) js.objdata(), js.objsize());
- logOp("i", ns, js);
- }
+ theDataFileMgr.insert(ns, (void*) js.objdata(), js.objsize());
+ logOp("i", ns, js);
+ }
}
extern int callDepth;
-class JniMessagingPort : public AbstractMessagingPort {
+class JniMessagingPort : public AbstractMessagingPort {
public:
- JniMessagingPort(Message& _container) : container(_container) { }
- void reply(Message& received, Message& response, MSGID) {
- container = response;
- }
- void reply(Message& received, Message& response) {
- container = response;
- }
- Message & container;
+ JniMessagingPort(Message& _container) : container(_container) { }
+ void reply(Message& received, Message& response, MSGID) {
+ container = response;
+ }
+ void reply(Message& received, Message& response) {
+ container = response;
+ }
+ Message & container;
};
-/* a call from java/js to the database locally.
+/* a call from java/js to the database locally.
m - inbound message
out - outbound message, if there is any, will be set here.
@@ -385,108 +389,108 @@ public:
*/
void jniCallback(Message& m, Message& out)
{
- Database *clientOld = database;
-
- JniMessagingPort jmp(out);
- callDepth++;
- int curOpOld = curOp;
-
- try {
-
- stringstream ss;
- char buf[64];
- time_t_to_String(time(0), buf);
- buf[20] = 0; // don't want the year
- ss << buf << " dbjs ";
-
- {
- Timer t;
-
- bool log = false;
- curOp = m.data->operation();
-
- if( m.data->operation() == dbQuery ) {
- // on a query, the Message must have m.freeIt true so that the buffer data can be
- // retained by cursors. As freeIt is false, we make a copy here.
- assert( m.data->len > 0 && m.data->len < 32000000 );
- Message copy(malloc(m.data->len), true);
- memcpy(copy.data, m.data, m.data->len);
- DbResponse dbr;
- receivedQuery(dbr, copy, ss, false);
- jmp.reply(m, *dbr.response, dbr.responseTo);
- }
- else if( m.data->operation() == dbInsert ) {
- ss << "insert ";
- receivedInsert(m, ss);
- }
- else if( m.data->operation() == dbUpdate ) {
- ss << "update ";
- receivedUpdate(m, ss);
- }
- else if( m.data->operation() == dbDelete ) {
- ss << "remove ";
- receivedDelete(m);
- }
- else if( m.data->operation() == dbGetMore ) {
- DEV log = true;
- ss << "getmore ";
- DbResponse dbr;
- receivedGetMore(dbr, m, ss);
- jmp.reply(m, *dbr.response, dbr.responseTo);
- }
- else if( m.data->operation() == dbKillCursors ) {
- try {
- log = true;
- ss << "killcursors ";
- receivedKillCursors(m);
- }
- catch( AssertionException& ) {
- problem() << "Caught Assertion in kill cursors, continuing" << endl;
- ss << " exception ";
- }
- }
- else {
- cout << " jnicall: operation isn't supported: " << m.data->operation() << endl;
- assert(false);
- }
-
- int ms = t.millis();
- log = log || ctr++ % 128 == 0;
- if( log || ms > 100 ) {
- ss << ' ' << t.millis() << "ms";
- cout << ss.str().c_str() << endl;
- }
- if( database && database->profile >= 1 ) {
- if( database->profile >= 2 || ms >= 100 ) {
- // profile it
- profile(ss.str().c_str()+20/*skip ts*/, ms);
- }
- }
- }
-
- }
- catch( AssertionException& ) {
- problem() << "Caught AssertionException in jniCall()" << endl;
- }
-
- curOp = curOpOld;
- callDepth--;
-
- if( database != clientOld ) {
- database = clientOld;
- wassert(false);
- }
+ Database *clientOld = database;
+
+ JniMessagingPort jmp(out);
+ callDepth++;
+ int curOpOld = curOp;
+
+ try {
+
+ stringstream ss;
+ char buf[64];
+ time_t_to_String(time(0), buf);
+ buf[20] = 0; // don't want the year
+ ss << buf << " dbjs ";
+
+ {
+ Timer t;
+
+ bool log = false;
+ curOp = m.data->operation();
+
+ if ( m.data->operation() == dbQuery ) {
+ // on a query, the Message must have m.freeIt true so that the buffer data can be
+ // retained by cursors. As freeIt is false, we make a copy here.
+ assert( m.data->len > 0 && m.data->len < 32000000 );
+ Message copy(malloc(m.data->len), true);
+ memcpy(copy.data, m.data, m.data->len);
+ DbResponse dbr;
+ receivedQuery(dbr, copy, ss, false);
+ jmp.reply(m, *dbr.response, dbr.responseTo);
+ }
+ else if ( m.data->operation() == dbInsert ) {
+ ss << "insert ";
+ receivedInsert(m, ss);
+ }
+ else if ( m.data->operation() == dbUpdate ) {
+ ss << "update ";
+ receivedUpdate(m, ss);
+ }
+ else if ( m.data->operation() == dbDelete ) {
+ ss << "remove ";
+ receivedDelete(m);
+ }
+ else if ( m.data->operation() == dbGetMore ) {
+ DEV log = true;
+ ss << "getmore ";
+ DbResponse dbr;
+ receivedGetMore(dbr, m, ss);
+ jmp.reply(m, *dbr.response, dbr.responseTo);
+ }
+ else if ( m.data->operation() == dbKillCursors ) {
+ try {
+ log = true;
+ ss << "killcursors ";
+ receivedKillCursors(m);
+ }
+ catch ( AssertionException& ) {
+ problem() << "Caught Assertion in kill cursors, continuing" << endl;
+ ss << " exception ";
+ }
+ }
+ else {
+ cout << " jnicall: operation isn't supported: " << m.data->operation() << endl;
+ assert(false);
+ }
+
+ int ms = t.millis();
+ log = log || ctr++ % 128 == 0;
+ if ( log || ms > 100 ) {
+ ss << ' ' << t.millis() << "ms";
+ cout << ss.str().c_str() << endl;
+ }
+ if ( database && database->profile >= 1 ) {
+ if ( database->profile >= 2 || ms >= 100 ) {
+ // profile it
+ profile(ss.str().c_str()+20/*skip ts*/, ms);
+ }
+ }
+ }
+
+ }
+ catch ( AssertionException& ) {
+ problem() << "Caught AssertionException in jniCall()" << endl;
+ }
+
+ curOp = curOpOld;
+ callDepth--;
+
+ if ( database != clientOld ) {
+ database = clientOld;
+ wassert(false);
+ }
}
#undef exit
-void dbexit(int rc, const char *why) {
- log() << " dbexit: " << why << "; flushing op log and files" << endl;
- flushOpLog();
+void dbexit(int rc, const char *why) {
+ log() << " dbexit: " << why << "; flushing op log and files" << endl;
+ flushOpLog();
- /* must do this before unmapping mem or you may get a seg fault */
- closeAllSockets();
+ /* must do this before unmapping mem or you may get a seg fault */
+ closeAllSockets();
- MemoryMappedFile::closeAllFiles();
- log() << " dbexit: really exiting now" << endl;
- exit(rc);
+ MemoryMappedFile::closeAllFiles();
+ log() << " dbexit: really exiting now" << endl;
+ exit(rc);
}
diff --git a/db/instance.h b/db/instance.h
index a0ea1586382..2f988d504d2 100644
--- a/db/instance.h
+++ b/db/instance.h
@@ -3,16 +3,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -20,9 +20,9 @@
#pragma once
// turn on or off the oplog.* files which the db can generate.
-// these files are for diagnostic purposes and are unrelated to
+// these files are for diagnostic purposes and are unrelated to
// local.oplog.$main used by replication.
-//
+//
#define OPLOG if( 0 )
int getOpLogging();
@@ -31,46 +31,50 @@ int getOpLogging();
#define OPREAD if( getOpLogging() & 2 ) _oplog.readop((char *) m.data, m.data->len);
struct OpLog {
- ofstream *f;
- OpLog() : f(0) { }
- void init() {
+ ofstream *f;
+ OpLog() : f(0) { }
+ void init() {
OPLOG {
- stringstream ss;
- ss << "oplog." << hex << time(0);
- string name = ss.str();
- f = new ofstream(name.c_str(), ios::out | ios::binary);
- if ( ! f->good() ){
- problem() << "couldn't open log stream" << endl;
- throw 1717;
- }
+ stringstream ss;
+ ss << "oplog." << hex << time(0);
+ string name = ss.str();
+ f = new ofstream(name.c_str(), ios::out | ios::binary);
+ if ( ! f->good() ) {
+ problem() << "couldn't open log stream" << endl;
+ throw 1717;
+ }
}
- }
- void flush() {
+ }
+ void flush() {
OPLOG f->flush();
}
- void write(char *data,int len) {
+ void write(char *data,int len) {
OPLOG f->write(data,len);
}
- void readop(char *data, int len) {
+ void readop(char *data, int len) {
OPLOG {
- bool log = (getOpLogging() & 4) == 0;
- OCCASIONALLY log = true;
- if( log )
- f->write(data,len);
+ bool log = (getOpLogging() & 4) == 0;
+ OCCASIONALLY log = true;
+ if ( log )
+ f->write(data,len);
}
- }
+ }
};
-/* we defer response until we unlock. don't want a blocked socket to
+/* we defer response until we unlock. don't want a blocked socket to
keep things locked.
*/
-struct DbResponse {
- Message *response;
- MSGID responseTo;
- DbResponse(Message *r, MSGID rt) : response(r), responseTo(rt) {
- }
- DbResponse() { response = 0; }
- ~DbResponse() { delete response; }
+struct DbResponse {
+ Message *response;
+ MSGID responseTo;
+ DbResponse(Message *r, MSGID rt) : response(r), responseTo(rt) {
+ }
+ DbResponse() {
+ response = 0;
+ }
+ ~DbResponse() {
+ delete response;
+ }
};
bool assembleResponse( Message &m, DbResponse &dbresponse );
diff --git a/db/introspect.cpp b/db/introspect.cpp
index bda75df05fd..cd2394c3242 100644
--- a/db/introspect.cpp
+++ b/db/introspect.cpp
@@ -1,17 +1,17 @@
-// introspect.cpp
+// introspect.cpp
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -28,28 +28,28 @@ typedef map<string,Cursor*> StringToCursor;
StringToCursor *specialNamespaces;
auto_ptr<Cursor> getSpecialCursor(const char *ns) {
- StringToCursor::iterator it = specialNamespaces->find(ns);
- return auto_ptr<Cursor>
- (it == specialNamespaces->end() ?
- 0 : it->second->clone());
+ StringToCursor::iterator it = specialNamespaces->find(ns);
+ return auto_ptr<Cursor>
+ (it == specialNamespaces->end() ?
+ 0 : it->second->clone());
}
void SingleResultObjCursor::reg(const char *as) {
- if( specialNamespaces == 0 )
- specialNamespaces = new StringToCursor();
- if( specialNamespaces->count(as) == 0 ) {
- (*specialNamespaces)[as] = this;
- }
+ if ( specialNamespaces == 0 )
+ specialNamespaces = new StringToCursor();
+ if ( specialNamespaces->count(as) == 0 ) {
+ (*specialNamespaces)[as] = this;
+ }
}
void profile(const char *str,
- int millis)
+ int millis)
{
- BSONObjBuilder b;
- b.appendDate("ts", jsTime());
- b.append("info", str);
- b.append("millis", (double) millis);
- BSONObj p = b.done();
- theDataFileMgr.insert(database->profileName.c_str(),
- p.objdata(), p.objsize(), true);
+ BSONObjBuilder b;
+ b.appendDate("ts", jsTime());
+ b.append("info", str);
+ b.append("millis", (double) millis);
+ BSONObj p = b.done();
+ theDataFileMgr.insert(database->profileName.c_str(),
+ p.objdata(), p.objsize(), true);
}
diff --git a/db/introspect.h b/db/introspect.h
index 01cbbfce184..71da3cefbb3 100644
--- a/db/introspect.h
+++ b/db/introspect.h
@@ -3,16 +3,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -26,36 +26,48 @@
auto_ptr<Cursor> getSpecialCursor(const char *ns);
class SingleResultObjCursor : public Cursor {
- int i;
+ int i;
protected:
- BSONObjBuilder b;
- void reg(const char *as); /* register as a certain namespace */
+ BSONObjBuilder b;
+ void reg(const char *as); /* register as a certain namespace */
public:
- SingleResultObjCursor() { i = 0; }
- virtual bool ok() { return i == 0; }
- virtual Record* _current() { assert(false); return 0; }
- virtual DiskLoc currLoc() { assert(false); return DiskLoc(); }
+ SingleResultObjCursor() {
+ i = 0;
+ }
+ virtual bool ok() {
+ return i == 0;
+ }
+ virtual Record* _current() {
+ assert(false);
+ return 0;
+ }
+ virtual DiskLoc currLoc() {
+ assert(false);
+ return DiskLoc();
+ }
- virtual void fill() = 0;
+ virtual void fill() = 0;
- virtual BSONObj current() {
- assert(i == 0);
- fill();
- return b.done();
- }
+ virtual BSONObj current() {
+ assert(i == 0);
+ fill();
+ return b.done();
+ }
- virtual bool advance() {
- i++;
- return false;
- }
+ virtual bool advance() {
+ i++;
+ return false;
+ }
- virtual string toString() { return "SingleResultObjCursor"; }
+ virtual string toString() {
+ return "SingleResultObjCursor";
+ }
};
-/* --- profiling --------------------------------------------
+/* --- profiling --------------------------------------------
do when database->profile is set
*/
void profile(const char *str,
- int millis);
+ int millis);
diff --git a/db/javajs.cpp b/db/javajs.cpp
index f8d31766ab0..ba40366c51e 100644
--- a/db/javajs.cpp
+++ b/db/javajs.cpp
@@ -2,15 +2,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -21,7 +21,7 @@
#include <map>
#include <list>
-using namespace boost::filesystem;
+using namespace boost::filesystem;
//#define JNI_DEBUG 1
@@ -30,7 +30,7 @@ using namespace boost::filesystem;
#define JNI_DEBUG(x) cerr << x << endl
#else
#undef JNI_DEBUG
-#define JNI_DEBUG(x)
+#define JNI_DEBUG(x)
#endif
@@ -45,7 +45,7 @@ using namespace boost::filesystem;
using namespace std;
#if defined(_WIN32)
-/* [dm] this being undefined without us adding it here means there is
+/* [dm] this being undefined without us adding it here means there is
no tss cleanup on windows for boost lib?
we don't care for now esp on windows only
@@ -61,10 +61,10 @@ using namespace std;
If Boost.Threads later implements automatic tss cleanup in cases
where it currently doesn't (which is the plan), the duplicate
symbol error will warn the user that their custom solution is no
- longer needed and can be removed.
+ longer needed and can be removed.
*/
extern "C" void tss_cleanup_implemented(void) {
- //cout << "tss_cleanup_implemented called" << endl;
+ //cout << "tss_cleanup_implemented called" << endl;
}
#endif
@@ -72,12 +72,12 @@ JavaJSImpl * JavaJS = 0;
#if !defined(NOJNI)
-void myJNIClean( JNIEnv * env ){
- JavaJS->detach( env );
+void myJNIClean( JNIEnv * env ) {
+ JavaJS->detach( env );
}
JavaJSImpl::JavaJSImpl() {
- JavaJSImpl(null);
+ JavaJSImpl(null);
}
#if defined(_WIN32)
@@ -87,370 +87,370 @@ const char SYSTEM_COLON = ':';
#endif
-void _addClassPath( const char * ed , stringstream & ss , const char * subdir ){
- path includeDir(ed);
- includeDir /= subdir;
- directory_iterator end;
- try {
- directory_iterator i(includeDir);
- while( i != end ) {
- path p = *i;
- ss << SYSTEM_COLON << p.string();
- i++;
- }
- }
- catch(...) {
- problem() << "exception looking for ed class path includeDir: " << includeDir.string() << endl;
- sleepsecs(3);
- dbexit(116);
- }
+void _addClassPath( const char * ed , stringstream & ss , const char * subdir ) {
+ path includeDir(ed);
+ includeDir /= subdir;
+ directory_iterator end;
+ try {
+ directory_iterator i(includeDir);
+ while ( i != end ) {
+ path p = *i;
+ ss << SYSTEM_COLON << p.string();
+ i++;
+ }
+ }
+ catch (...) {
+ problem() << "exception looking for ed class path includeDir: " << includeDir.string() << endl;
+ sleepsecs(3);
+ dbexit(116);
+ }
}
-JavaJSImpl::JavaJSImpl(const char *appserverPath){
- _jvm = 0;
- _mainEnv = 0;
- _dbhook = 0;
+JavaJSImpl::JavaJSImpl(const char *appserverPath) {
+ _jvm = 0;
+ _mainEnv = 0;
+ _dbhook = 0;
- const char * ed = findEd(appserverPath);
- stringstream ss;
+ const char * ed = findEd(appserverPath);
+ stringstream ss;
- ss << "-Djava.class.path=.";
- ss << SYSTEM_COLON << ed << "/build/";
+ ss << "-Djava.class.path=.";
+ ss << SYSTEM_COLON << ed << "/build/";
- _addClassPath( ed , ss , "include" );
- _addClassPath( ed , ss , "include/jython/" );
- _addClassPath( ed , ss , "include/jython/javalib" );
+ _addClassPath( ed , ss , "include" );
+ _addClassPath( ed , ss , "include/jython/" );
+ _addClassPath( ed , ss , "include/jython/javalib" );
#if defined(_WIN32)
- ss << SYSTEM_COLON << "C:\\Program Files\\Java\\jdk\\lib\\tools.jar";
+ ss << SYSTEM_COLON << "C:\\Program Files\\Java\\jdk\\lib\\tools.jar";
#else
- ss << SYSTEM_COLON << "/opt/java/lib/tools.jar";
+ ss << SYSTEM_COLON << "/opt/java/lib/tools.jar";
#endif
- if( getenv( "CLASSPATH" ) )
- ss << SYSTEM_COLON << getenv( "CLASSPATH" );
+ if ( getenv( "CLASSPATH" ) )
+ ss << SYSTEM_COLON << getenv( "CLASSPATH" );
- string s = ss.str();
- char * p = (char *)malloc( s.size() * 4 );
- strcpy( p , s.c_str() );
- char *q = p;
+ string s = ss.str();
+ char * p = (char *)malloc( s.size() * 4 );
+ strcpy( p , s.c_str() );
+ char *q = p;
#if defined(_WIN32)
- while( *p ) {
- if( *p == '/' ) *p = '\\';
- p++;
- }
+ while ( *p ) {
+ if ( *p == '/' ) *p = '\\';
+ p++;
+ }
#endif
- JavaVMOption * options = new JavaVMOption[3];
- options[0].optionString = q;
- options[1].optionString = (char*)"-Djava.awt.headless=true";
- options[2].optionString = (char*)"-Xmx300m";
-// -Xcheck:jni
-
- _vmArgs = new JavaVMInitArgs();
- _vmArgs->version = JNI_VERSION_1_4;
- _vmArgs->options = options;
- _vmArgs->nOptions = 3;
- _vmArgs->ignoreUnrecognized = JNI_FALSE;
-
- log() << "Creating JVM" << endl;
- jint res = JNI_CreateJavaVM( &_jvm, (void**)&_mainEnv, _vmArgs );
-
- if( res ) {
- log() << "using classpath: " << q << endl;
- log()
- << " res : " << (unsigned) res << " "
- << "_jvm : " << _jvm << " "
- << "_env : " << _mainEnv << " "
- << endl;
- }
-
- if( res ) {
- problem() << "Couldn't create JVM res:" << (int) res << " terminating" << endl;
- log() << "(try --nojni if you do not require that functionality)" << endl;
- exit(22);
- }
- jassert( res == 0 );
- jassert( _jvm > 0 );
- jassert( _mainEnv > 0 );
-
- _envs = new boost::thread_specific_ptr<JNIEnv>( myJNIClean );
- assert( ! _envs->get() );
- _envs->reset( _mainEnv );
-
- _dbhook = findClass( "ed/db/JSHook" );
- if( _dbhook == 0 )
- log() << "using classpath: " << q << endl;
- jassert( _dbhook );
-
- {
- jmethodID init = _mainEnv->GetStaticMethodID( _dbhook , "init" , "(Ljava/lang/String;)V" );
- jassert( init );
- _mainEnv->CallStaticVoidMethod( _dbhook , init , _getEnv()->NewStringUTF( ed ) );
- }
-
- _dbjni = findClass( "ed/db/DBJni" );
- jassert( _dbjni );
-
- _scopeCreate = _mainEnv->GetStaticMethodID( _dbhook , "scopeCreate" , "()J" );
- _scopeInit = _mainEnv->GetStaticMethodID( _dbhook , "scopeInit" , "(JLjava/nio/ByteBuffer;)Z" );
- _scopeSetThis = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetThis" , "(JLjava/nio/ByteBuffer;)Z" );
- _scopeReset = _mainEnv->GetStaticMethodID( _dbhook , "scopeReset" , "(J)Z" );
- _scopeFree = _mainEnv->GetStaticMethodID( _dbhook , "scopeFree" , "(J)V" );
-
- _scopeGetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetNumber" , "(JLjava/lang/String;)D" );
- _scopeGetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetString" , "(JLjava/lang/String;)Ljava/lang/String;" );
- _scopeGetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetBoolean" , "(JLjava/lang/String;)Z" );
- _scopeGetType = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetType" , "(JLjava/lang/String;)B" );
- _scopeGetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)I" );
- _scopeGuessObjectSize = _mainEnv->GetStaticMethodID( _dbhook , "scopeGuessObjectSize" , "(JLjava/lang/String;)J" );
-
- _scopeSetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetNumber" , "(JLjava/lang/String;D)Z" );
- _scopeSetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetBoolean" , "(JLjava/lang/String;Z)Z" );
- _scopeSetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetString" , "(JLjava/lang/String;Ljava/lang/String;)Z" );
- _scopeSetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)Z" );
-
- _functionCreate = _mainEnv->GetStaticMethodID( _dbhook , "functionCreate" , "(Ljava/lang/String;)J" );
- _invoke = _mainEnv->GetStaticMethodID( _dbhook , "invoke" , "(JJ)I" );
-
- jassert( _scopeCreate );
- jassert( _scopeInit );
- jassert( _scopeSetThis );
- jassert( _scopeReset );
- jassert( _scopeFree );
-
- jassert( _scopeGetNumber );
- jassert( _scopeGetString );
- jassert( _scopeGetObject );
- jassert( _scopeGetBoolean );
- jassert( _scopeGetType );
- jassert( _scopeGuessObjectSize );
-
- jassert( _scopeSetNumber );
- jassert( _scopeSetBoolean );
- jassert( _scopeSetString );
- jassert( _scopeSetObject );
-
- jassert( _functionCreate );
- jassert( _invoke );
-
- JNINativeMethod * nativeSay = new JNINativeMethod();
- nativeSay->name = (char*)"native_say";
- nativeSay->signature = (char*)"(Ljava/nio/ByteBuffer;)V";
- nativeSay->fnPtr = (void*)java_native_say;
- _mainEnv->RegisterNatives( _dbjni , nativeSay , 1 );
-
-
- JNINativeMethod * nativeCall = new JNINativeMethod();
- nativeCall->name = (char*)"native_call";
- nativeCall->signature = (char*)"(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;)I";
- nativeCall->fnPtr = (void*)java_native_call;
- _mainEnv->RegisterNatives( _dbjni , nativeCall , 1 );
+ JavaVMOption * options = new JavaVMOption[3];
+ options[0].optionString = q;
+ options[1].optionString = (char*)"-Djava.awt.headless=true";
+ options[2].optionString = (char*)"-Xmx300m";
+// -Xcheck:jni
+
+ _vmArgs = new JavaVMInitArgs();
+ _vmArgs->version = JNI_VERSION_1_4;
+ _vmArgs->options = options;
+ _vmArgs->nOptions = 3;
+ _vmArgs->ignoreUnrecognized = JNI_FALSE;
+
+ log() << "Creating JVM" << endl;
+ jint res = JNI_CreateJavaVM( &_jvm, (void**)&_mainEnv, _vmArgs );
+
+ if ( res ) {
+ log() << "using classpath: " << q << endl;
+ log()
+ << " res : " << (unsigned) res << " "
+ << "_jvm : " << _jvm << " "
+ << "_env : " << _mainEnv << " "
+ << endl;
+ }
+
+ if ( res ) {
+ problem() << "Couldn't create JVM res:" << (int) res << " terminating" << endl;
+ log() << "(try --nojni if you do not require that functionality)" << endl;
+ exit(22);
+ }
+ jassert( res == 0 );
+ jassert( _jvm > 0 );
+ jassert( _mainEnv > 0 );
+
+ _envs = new boost::thread_specific_ptr<JNIEnv>( myJNIClean );
+ assert( ! _envs->get() );
+ _envs->reset( _mainEnv );
+
+ _dbhook = findClass( "ed/db/JSHook" );
+ if ( _dbhook == 0 )
+ log() << "using classpath: " << q << endl;
+ jassert( _dbhook );
+
+ {
+ jmethodID init = _mainEnv->GetStaticMethodID( _dbhook , "init" , "(Ljava/lang/String;)V" );
+ jassert( init );
+ _mainEnv->CallStaticVoidMethod( _dbhook , init , _getEnv()->NewStringUTF( ed ) );
+ }
+
+ _dbjni = findClass( "ed/db/DBJni" );
+ jassert( _dbjni );
+
+ _scopeCreate = _mainEnv->GetStaticMethodID( _dbhook , "scopeCreate" , "()J" );
+ _scopeInit = _mainEnv->GetStaticMethodID( _dbhook , "scopeInit" , "(JLjava/nio/ByteBuffer;)Z" );
+ _scopeSetThis = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetThis" , "(JLjava/nio/ByteBuffer;)Z" );
+ _scopeReset = _mainEnv->GetStaticMethodID( _dbhook , "scopeReset" , "(J)Z" );
+ _scopeFree = _mainEnv->GetStaticMethodID( _dbhook , "scopeFree" , "(J)V" );
+
+ _scopeGetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetNumber" , "(JLjava/lang/String;)D" );
+ _scopeGetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetString" , "(JLjava/lang/String;)Ljava/lang/String;" );
+ _scopeGetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetBoolean" , "(JLjava/lang/String;)Z" );
+ _scopeGetType = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetType" , "(JLjava/lang/String;)B" );
+ _scopeGetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)I" );
+ _scopeGuessObjectSize = _mainEnv->GetStaticMethodID( _dbhook , "scopeGuessObjectSize" , "(JLjava/lang/String;)J" );
+
+ _scopeSetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetNumber" , "(JLjava/lang/String;D)Z" );
+ _scopeSetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetBoolean" , "(JLjava/lang/String;Z)Z" );
+ _scopeSetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetString" , "(JLjava/lang/String;Ljava/lang/String;)Z" );
+ _scopeSetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)Z" );
+
+ _functionCreate = _mainEnv->GetStaticMethodID( _dbhook , "functionCreate" , "(Ljava/lang/String;)J" );
+ _invoke = _mainEnv->GetStaticMethodID( _dbhook , "invoke" , "(JJ)I" );
+
+ jassert( _scopeCreate );
+ jassert( _scopeInit );
+ jassert( _scopeSetThis );
+ jassert( _scopeReset );
+ jassert( _scopeFree );
+
+ jassert( _scopeGetNumber );
+ jassert( _scopeGetString );
+ jassert( _scopeGetObject );
+ jassert( _scopeGetBoolean );
+ jassert( _scopeGetType );
+ jassert( _scopeGuessObjectSize );
+
+ jassert( _scopeSetNumber );
+ jassert( _scopeSetBoolean );
+ jassert( _scopeSetString );
+ jassert( _scopeSetObject );
+
+ jassert( _functionCreate );
+ jassert( _invoke );
+
+ JNINativeMethod * nativeSay = new JNINativeMethod();
+ nativeSay->name = (char*)"native_say";
+ nativeSay->signature = (char*)"(Ljava/nio/ByteBuffer;)V";
+ nativeSay->fnPtr = (void*)java_native_say;
+ _mainEnv->RegisterNatives( _dbjni , nativeSay , 1 );
+
+
+ JNINativeMethod * nativeCall = new JNINativeMethod();
+ nativeCall->name = (char*)"native_call";
+ nativeCall->signature = (char*)"(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;)I";
+ nativeCall->fnPtr = (void*)java_native_call;
+ _mainEnv->RegisterNatives( _dbjni , nativeCall , 1 );
}
-JavaJSImpl::~JavaJSImpl(){
- if ( _jvm ){
- _jvm->DestroyJavaVM();
- cerr << "Destroying JVM" << endl;
- }
+JavaJSImpl::~JavaJSImpl() {
+ if ( _jvm ) {
+ _jvm->DestroyJavaVM();
+ cerr << "Destroying JVM" << endl;
+ }
}
// scope
-jlong JavaJSImpl::scopeCreate(){
- return _getEnv()->CallStaticLongMethod( _dbhook , _scopeCreate );
+jlong JavaJSImpl::scopeCreate() {
+ return _getEnv()->CallStaticLongMethod( _dbhook , _scopeCreate );
}
-jboolean JavaJSImpl::scopeReset( jlong id ){
- return _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeReset );
+jboolean JavaJSImpl::scopeReset( jlong id ) {
+ return _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeReset );
}
-void JavaJSImpl::scopeFree( jlong id ){
- _getEnv()->CallStaticVoidMethod( _dbhook , _scopeFree , id );
+void JavaJSImpl::scopeFree( jlong id ) {
+ _getEnv()->CallStaticVoidMethod( _dbhook , _scopeFree , id );
}
// scope setters
int JavaJSImpl::scopeSetBoolean( jlong id , const char * field , jboolean val ) {
- jstring fieldString = _getEnv()->NewStringUTF( field );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
- _getEnv()->DeleteLocalRef( fieldString );
- return res;
+ jstring fieldString = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
+ _getEnv()->DeleteLocalRef( fieldString );
+ return res;
}
-int JavaJSImpl::scopeSetNumber( jlong id , const char * field , double val ){
- jstring fieldString = _getEnv()->NewStringUTF( field );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
- _getEnv()->DeleteLocalRef( fieldString );
- return res;
+int JavaJSImpl::scopeSetNumber( jlong id , const char * field , double val ) {
+ jstring fieldString = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
+ _getEnv()->DeleteLocalRef( fieldString );
+ return res;
}
-int JavaJSImpl::scopeSetString( jlong id , const char * field , const char * val ){
- jstring s1 = _getEnv()->NewStringUTF( field );
- jstring s2 = _getEnv()->NewStringUTF( val );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetString , id , s1 , s2 );
- _getEnv()->DeleteLocalRef( s1 );
- _getEnv()->DeleteLocalRef( s2 );
- return res;
+int JavaJSImpl::scopeSetString( jlong id , const char * field , const char * val ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jstring s2 = _getEnv()->NewStringUTF( val );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetString , id , s1 , s2 );
+ _getEnv()->DeleteLocalRef( s1 );
+ _getEnv()->DeleteLocalRef( s2 );
+ return res;
}
-int JavaJSImpl::scopeSetObject( jlong id , const char * field , BSONObj * obj ){
- jobject bb = 0;
- if ( obj ){
- bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
- jassert( bb );
- }
-
- jstring s1 = _getEnv()->NewStringUTF( field );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetObject , id , s1 , bb );
- _getEnv()->DeleteLocalRef( s1 );
- if ( bb )
- _getEnv()->DeleteLocalRef( bb );
-
- return res;
+int JavaJSImpl::scopeSetObject( jlong id , const char * field , BSONObj * obj ) {
+ jobject bb = 0;
+ if ( obj ) {
+ bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+ }
+
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetObject , id , s1 , bb );
+ _getEnv()->DeleteLocalRef( s1 );
+ if ( bb )
+ _getEnv()->DeleteLocalRef( bb );
+
+ return res;
}
-int JavaJSImpl::scopeInit( jlong id , BSONObj * obj ){
- if ( ! obj )
- return 0;
+int JavaJSImpl::scopeInit( jlong id , BSONObj * obj ) {
+ if ( ! obj )
+ return 0;
- jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
- jassert( bb );
-
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeInit , id , bb );
- _getEnv()->DeleteLocalRef( bb );
- return res;
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeInit , id , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ return res;
}
-int JavaJSImpl::scopeSetThis( jlong id , BSONObj * obj ){
- if ( ! obj )
- return 0;
-
- jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
- jassert( bb );
+int JavaJSImpl::scopeSetThis( jlong id , BSONObj * obj ) {
+ if ( ! obj )
+ return 0;
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetThis , id , bb );
- _getEnv()->DeleteLocalRef( bb );
- return res;
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetThis , id , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ return res;
}
// scope getters
-char JavaJSImpl::scopeGetType( jlong id , const char * field ){
- jstring s1 = _getEnv()->NewStringUTF( field );
- int res =_getEnv()->CallStaticByteMethod( _dbhook , _scopeGetType , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
- return res;
+char JavaJSImpl::scopeGetType( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int res =_getEnv()->CallStaticByteMethod( _dbhook , _scopeGetType , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
}
-double JavaJSImpl::scopeGetNumber( jlong id , const char * field ){
- jstring s1 = _getEnv()->NewStringUTF( field );
- double res = _getEnv()->CallStaticDoubleMethod( _dbhook , _scopeGetNumber , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
- return res;
+double JavaJSImpl::scopeGetNumber( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ double res = _getEnv()->CallStaticDoubleMethod( _dbhook , _scopeGetNumber , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
}
-jboolean JavaJSImpl::scopeGetBoolean( jlong id , const char * field ){
- jstring s1 = _getEnv()->NewStringUTF( field );
- jboolean res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeGetBoolean , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
- return res;
+jboolean JavaJSImpl::scopeGetBoolean( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jboolean res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeGetBoolean , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
}
string JavaJSImpl::scopeGetString( jlong id , const char * field ) {
- jstring s1 = _getEnv()->NewStringUTF( field );
- jstring s = (jstring)_getEnv()->CallStaticObjectMethod( _dbhook , _scopeGetString , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
-
- if ( ! s )
- return "";
-
- const char * c = _getEnv()->GetStringUTFChars( s , 0 );
- string retStr(c);
- _getEnv()->ReleaseStringUTFChars( s , c );
- return retStr;
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jstring s = (jstring)_getEnv()->CallStaticObjectMethod( _dbhook , _scopeGetString , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+
+ if ( ! s )
+ return "";
+
+ const char * c = _getEnv()->GetStringUTFChars( s , 0 );
+ string retStr(c);
+ _getEnv()->ReleaseStringUTFChars( s , c );
+ return retStr;
}
#ifdef J_USE_OBJ
-BSONObj JavaJSImpl::scopeGetObject( jlong id , const char * field )
+BSONObj JavaJSImpl::scopeGetObject( jlong id , const char * field )
{
- jstring s1 = _getEnv()->NewStringUTF( field );
- int guess = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGuessObjectSize , id , _getEnv()->NewStringUTF( field ) );
- _getEnv()->DeleteLocalRef( s1 );
-
- char * buf = (char *) malloc(guess);
- jobject bb = _getEnv()->NewDirectByteBuffer( (void*)buf , guess );
- jassert( bb );
-
- int len = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGetObject , id , _getEnv()->NewStringUTF( field ) , bb );
- _getEnv()->DeleteLocalRef( bb );
- //cout << "len : " << len << endl;
- jassert( len > 0 && len < guess );
-
- BSONObj obj(buf, true);
- assert( obj.objsize() <= guess );
- return obj;
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int guess = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGuessObjectSize , id , _getEnv()->NewStringUTF( field ) );
+ _getEnv()->DeleteLocalRef( s1 );
+
+ char * buf = (char *) malloc(guess);
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)buf , guess );
+ jassert( bb );
+
+ int len = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGetObject , id , _getEnv()->NewStringUTF( field ) , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ //cout << "len : " << len << endl;
+ jassert( len > 0 && len < guess );
+
+ BSONObj obj(buf, true);
+ assert( obj.objsize() <= guess );
+ return obj;
}
#endif
// other
-jlong JavaJSImpl::functionCreate( const char * code ){
- jstring s = _getEnv()->NewStringUTF( code );
- jassert( s );
- jlong id = _getEnv()->CallStaticLongMethod( _dbhook , _functionCreate , s );
- _getEnv()->DeleteLocalRef( s );
- return id;
+jlong JavaJSImpl::functionCreate( const char * code ) {
+ jstring s = _getEnv()->NewStringUTF( code );
+ jassert( s );
+ jlong id = _getEnv()->CallStaticLongMethod( _dbhook , _functionCreate , s );
+ _getEnv()->DeleteLocalRef( s );
+ return id;
}
-
-int JavaJSImpl::invoke( jlong scope , jlong function ){
- return _getEnv()->CallStaticIntMethod( _dbhook , _invoke , scope , function );
+
+int JavaJSImpl::invoke( jlong scope , jlong function ) {
+ return _getEnv()->CallStaticIntMethod( _dbhook , _invoke , scope , function );
}
// --- fun run method
-void JavaJSImpl::run( const char * js ){
- jclass c = findClass( "ed/js/JS" );
- jassert( c );
-
- jmethodID m = _getEnv()->GetStaticMethodID( c , "eval" , "(Ljava/lang/String;)Ljava/lang/Object;" );
- jassert( m );
-
- jstring s = _getEnv()->NewStringUTF( js );
- log() << _getEnv()->CallStaticObjectMethod( c , m , s ) << endl;
- _getEnv()->DeleteLocalRef( s );
+void JavaJSImpl::run( const char * js ) {
+ jclass c = findClass( "ed/js/JS" );
+ jassert( c );
+
+ jmethodID m = _getEnv()->GetStaticMethodID( c , "eval" , "(Ljava/lang/String;)Ljava/lang/Object;" );
+ jassert( m );
+
+ jstring s = _getEnv()->NewStringUTF( js );
+ log() << _getEnv()->CallStaticObjectMethod( c , m , s ) << endl;
+ _getEnv()->DeleteLocalRef( s );
}
-void JavaJSImpl::printException(){
- jthrowable exc = _getEnv()->ExceptionOccurred();
- if ( exc ){
- _getEnv()->ExceptionDescribe();
- _getEnv()->ExceptionClear();
- }
+void JavaJSImpl::printException() {
+ jthrowable exc = _getEnv()->ExceptionOccurred();
+ if ( exc ) {
+ _getEnv()->ExceptionDescribe();
+ _getEnv()->ExceptionClear();
+ }
}
-JNIEnv * JavaJSImpl::_getEnv(){
- JNIEnv * env = _envs->get();
- if ( env )
- return env;
+JNIEnv * JavaJSImpl::_getEnv() {
+ JNIEnv * env = _envs->get();
+ if ( env )
+ return env;
- int res = _jvm->AttachCurrentThread( (void**)&env , (void*)&_vmArgs );
- if( res ) {
- cout << "ERROR javajs attachcurrentthread fails res:" << res << '\n';
- assert(false);
- }
+ int res = _jvm->AttachCurrentThread( (void**)&env , (void*)&_vmArgs );
+ if ( res ) {
+ cout << "ERROR javajs attachcurrentthread fails res:" << res << '\n';
+ assert(false);
+ }
- _envs->reset( env );
- return env;
+ _envs->reset( env );
+ return env;
}
-void jasserted(const char *msg, const char *file, unsigned line) {
- log() << "jassert failed " << msg << " " << file << " " << line << endl;
- if ( JavaJS ) JavaJS->printException();
- throw AssertionException();
+void jasserted(const char *msg, const char *file, unsigned line) {
+ log() << "jassert failed " << msg << " " << file << " " << line << endl;
+ if ( JavaJS ) JavaJS->printException();
+ throw AssertionException();
}
@@ -458,201 +458,203 @@ const char* findEd(const char *path) {
#if defined(_WIN32)
- if (!path) {
- path = findEd();
- }
+ if (!path) {
+ path = findEd();
+ }
- // @TODO check validity
+ // @TODO check validity
- return path;
+ return path;
#else
- if (!path) {
- return findEd();
- }
-
- log() << "Appserver location specified : " << path << endl;
-
- if (!path) {
- log() << " invalid appserver location : " << path << " : terminating - prepare for bus error" << endl;
- return 0;
- }
-
- DIR *testDir = opendir(path);
-
- if (testDir) {
- log() << " found directory for appserver : " << path << endl;
- closedir(testDir);
- return path;
- }
- else {
- log() << " ERROR : not a directory for specified appserver location : " << path << " - prepare for bus error" << endl;
- return null;
- }
+ if (!path) {
+ return findEd();
+ }
+
+ log() << "Appserver location specified : " << path << endl;
+
+ if (!path) {
+ log() << " invalid appserver location : " << path << " : terminating - prepare for bus error" << endl;
+ return 0;
+ }
+
+ DIR *testDir = opendir(path);
+
+ if (testDir) {
+ log() << " found directory for appserver : " << path << endl;
+ closedir(testDir);
+ return path;
+ }
+ else {
+ log() << " ERROR : not a directory for specified appserver location : " << path << " - prepare for bus error" << endl;
+ return null;
+ }
#endif
}
-const char * findEd(){
+const char * findEd() {
#if defined(_WIN32)
- log() << "Appserver location will be WIN32 default : c:/l/ed/" << endl;
- return "c:/l/ed";
+ log() << "Appserver location will be WIN32 default : c:/l/ed/" << endl;
+ return "c:/l/ed";
#else
- static list<const char*> possibleEdDirs;
- if ( ! possibleEdDirs.size() ){
- possibleEdDirs.push_back( "../../ed/ed/" ); // this one for dwight dev box
- possibleEdDirs.push_back( "../ed/" );
- possibleEdDirs.push_back( "../../ed/" );
- possibleEdDirs.push_back( "../babble/" );
- possibleEdDirs.push_back( "../../babble/" );
- }
-
- for ( list<const char*>::iterator i = possibleEdDirs.begin() ; i != possibleEdDirs.end(); i++ ){
- const char * temp = *i;
- DIR * test = opendir( temp );
- if ( ! test )
- continue;
-
- closedir( test );
- log() << "Found directory for appserver : " << temp << endl;
- return temp;
- }
-
- problem() << "ERROR : can't find directory for appserver - terminating" << endl;
- exit(44);
- return 0;
+ static list<const char*> possibleEdDirs;
+ if ( ! possibleEdDirs.size() ) {
+ possibleEdDirs.push_back( "../../ed/ed/" ); // this one for dwight dev box
+ possibleEdDirs.push_back( "../ed/" );
+ possibleEdDirs.push_back( "../../ed/" );
+ possibleEdDirs.push_back( "../babble/" );
+ possibleEdDirs.push_back( "../../babble/" );
+ }
+
+ for ( list<const char*>::iterator i = possibleEdDirs.begin() ; i != possibleEdDirs.end(); i++ ) {
+ const char * temp = *i;
+ DIR * test = opendir( temp );
+ if ( ! test )
+ continue;
+
+ closedir( test );
+ log() << "Found directory for appserver : " << temp << endl;
+ return temp;
+ }
+
+ problem() << "ERROR : can't find directory for appserver - terminating" << endl;
+ exit(44);
+ return 0;
#endif
};
// ---
-JNIEXPORT void JNICALL java_native_say(JNIEnv * env , jclass, jobject outBuffer ){
- JNI_DEBUG( "native say called!" );
+JNIEXPORT void JNICALL java_native_say(JNIEnv * env , jclass, jobject outBuffer ) {
+ JNI_DEBUG( "native say called!" );
- Message out( env->GetDirectBufferAddress( outBuffer ) , false );
- Message in;
+ Message out( env->GetDirectBufferAddress( outBuffer ) , false );
+ Message in;
- jniCallback( out , in );
+ jniCallback( out , in );
}
-JNIEXPORT jint JNICALL java_native_call(JNIEnv * env , jclass, jobject outBuffer , jobject inBuffer ){
- JNI_DEBUG( "native call called!" );
-
- Message out( env->GetDirectBufferAddress( outBuffer ) , false );
- Message in;
+JNIEXPORT jint JNICALL java_native_call(JNIEnv * env , jclass, jobject outBuffer , jobject inBuffer ) {
+ JNI_DEBUG( "native call called!" );
- jniCallback( out , in );
+ Message out( env->GetDirectBufferAddress( outBuffer ) , false );
+ Message in;
- JNI_DEBUG( "in.data : " << in.data );
- if ( in.data && in.data->len > 0 ){
- JNI_DEBUG( "copying data of len :" << in.data->len );
- memcpy( env->GetDirectBufferAddress( inBuffer ) , in.data , in.data->len );
- return in.data->len;
- }
+ jniCallback( out , in );
- return 0;
+ JNI_DEBUG( "in.data : " << in.data );
+ if ( in.data && in.data->len > 0 ) {
+ JNI_DEBUG( "copying data of len :" << in.data->len );
+ memcpy( env->GetDirectBufferAddress( inBuffer ) , in.data , in.data->len );
+ return in.data->len;
+ }
+
+ return 0;
}
// ----
int javajstest() {
-
- const int debug = 0;
-
- JavaJSImpl& JavaJS = *::JavaJS;
-
- if ( debug ) log() << "about to create scope" << endl;
- jlong scope = JavaJS.scopeCreate();
- jassert( scope );
- if ( debug ) cout << "got scope" << endl;
-
-
- jlong func1 = JavaJS.functionCreate( "foo = 5.6; bar = \"eliot\"; abc = { foo : 517 }; " );
- jassert( ! JavaJS.invoke( scope , func1 ) );
-
- jassert( 5.6 == JavaJS.scopeGetNumber( scope , "foo" ) );
- jassert( ((string)"eliot") == JavaJS.scopeGetString( scope , "bar" ) );
-
- if ( debug ) cout << "func2 start" << endl;
- jassert( JavaJS.scopeSetNumber( scope , "a" , 5.17 ) );
- jassert( JavaJS.scopeSetString( scope , "b" , "eliot" ) );
- jlong func2 = JavaJS.functionCreate( "assert( 5.17 == a ); assert( \"eliot\" == b );" );
- jassert( ! JavaJS.invoke( scope , func2 ) );
- if ( debug ) cout << "func2 end" << endl;
-
- if ( debug ) cout << "func3 start" << endl;
- jlong func3 = JavaJS.functionCreate( "function(){ z = true; } " );
- jassert( func3 );
- jassert( ! JavaJS.invoke( scope , func3 ) );
- jassert( JavaJS.scopeGetBoolean( scope , "z" ) );
- if ( debug ) cout << "func3 done" << endl;
-
-#ifdef J_USE_OBJ
-
- if ( debug ) cout << "going to get object" << endl;
- BSONObj obj = JavaJS.scopeGetObject( scope , "abc" );
- if ( debug ) cout << "done getting object" << endl;
-
- if ( debug ){
- cout << "obj : " << obj.toString() << endl;
- }
-
- {
- time_t start = time(0);
- for ( int i=0; i<5000; i++ ){
- JavaJS.scopeSetObject( scope , "obj" , &obj );
+
+ const int debug = 0;
+
+ JavaJSImpl& JavaJS = *::JavaJS;
+
+ if ( debug ) log() << "about to create scope" << endl;
+ jlong scope = JavaJS.scopeCreate();
+ jassert( scope );
+ if ( debug ) cout << "got scope" << endl;
+
+
+ jlong func1 = JavaJS.functionCreate( "foo = 5.6; bar = \"eliot\"; abc = { foo : 517 }; " );
+ jassert( ! JavaJS.invoke( scope , func1 ) );
+
+ jassert( 5.6 == JavaJS.scopeGetNumber( scope , "foo" ) );
+ jassert( ((string)"eliot") == JavaJS.scopeGetString( scope , "bar" ) );
+
+ if ( debug ) cout << "func2 start" << endl;
+ jassert( JavaJS.scopeSetNumber( scope , "a" , 5.17 ) );
+ jassert( JavaJS.scopeSetString( scope , "b" , "eliot" ) );
+ jlong func2 = JavaJS.functionCreate( "assert( 5.17 == a ); assert( \"eliot\" == b );" );
+ jassert( ! JavaJS.invoke( scope , func2 ) );
+ if ( debug ) cout << "func2 end" << endl;
+
+ if ( debug ) cout << "func3 start" << endl;
+ jlong func3 = JavaJS.functionCreate( "function(){ z = true; } " );
+ jassert( func3 );
+ jassert( ! JavaJS.invoke( scope , func3 ) );
+ jassert( JavaJS.scopeGetBoolean( scope , "z" ) );
+ if ( debug ) cout << "func3 done" << endl;
+
+#ifdef J_USE_OBJ
+
+ if ( debug ) cout << "going to get object" << endl;
+ BSONObj obj = JavaJS.scopeGetObject( scope , "abc" );
+ if ( debug ) cout << "done getting object" << endl;
+
+ if ( debug ) {
+ cout << "obj : " << obj.toString() << endl;
}
- time_t end = time(0);
-
- if( debug )
- cout << "time : " << (unsigned) ( end - start ) << endl;
- }
-
- if ( debug ) cout << "func4 start" << endl;
- JavaJS.scopeSetObject( scope , "obj" , &obj );
- if ( debug ) cout << "\t here 1" << endl;
- jlong func4 = JavaJS.functionCreate( "tojson( obj );" );
- if ( debug ) cout << "\t here 2" << endl;
- jassert( ! JavaJS.invoke( scope , func4 ) );
- if ( debug ) cout << "func4 end" << endl;
-
- if ( debug ) cout << "func5 start" << endl;
- jassert( JavaJS.scopeSetObject( scope , "c" , &obj ) );
- jlong func5 = JavaJS.functionCreate( "assert.eq( 517 , c.foo );" );
- jassert( func5 );
- jassert( ! JavaJS.invoke( scope , func5 ) );
- if ( debug ) cout << "func5 done" << endl;
+
+ {
+ time_t start = time(0);
+ for ( int i=0; i<5000; i++ ) {
+ JavaJS.scopeSetObject( scope , "obj" , &obj );
+ }
+ time_t end = time(0);
+
+ if ( debug )
+ cout << "time : " << (unsigned) ( end - start ) << endl;
+ }
+
+ if ( debug ) cout << "func4 start" << endl;
+ JavaJS.scopeSetObject( scope , "obj" , &obj );
+ if ( debug ) cout << "\t here 1" << endl;
+ jlong func4 = JavaJS.functionCreate( "tojson( obj );" );
+ if ( debug ) cout << "\t here 2" << endl;
+ jassert( ! JavaJS.invoke( scope , func4 ) );
+ if ( debug ) cout << "func4 end" << endl;
+
+ if ( debug ) cout << "func5 start" << endl;
+ jassert( JavaJS.scopeSetObject( scope , "c" , &obj ) );
+ jlong func5 = JavaJS.functionCreate( "assert.eq( 517 , c.foo );" );
+ jassert( func5 );
+ jassert( ! JavaJS.invoke( scope , func5 ) );
+ if ( debug ) cout << "func5 done" << endl;
#endif
- if ( debug ) cout << "func6 start" << endl;
- for ( int i=0; i<100; i++ ){
- double val = i + 5;
- JavaJS.scopeSetNumber( scope , "zzz" , val );
- jlong func6 = JavaJS.functionCreate( " xxx = zzz; " );
- jassert( ! JavaJS.invoke( scope , func6 ) );
- double n = JavaJS.scopeGetNumber( scope , "xxx" );
- jassert( val == n );
- }
- if ( debug ) cout << "func6 done" << endl;
-
- jlong func7 = JavaJS.functionCreate( "return 11;" );
- jassert( ! JavaJS.invoke( scope , func7 ) );
- assert( 11 == JavaJS.scopeGetNumber( scope , "return" ) );
-
- scope = JavaJS.scopeCreate();
- jlong func8 = JavaJS.functionCreate( "function(){ return 12; }" );
- jassert( ! JavaJS.invoke( scope , func8 ) );
- assert( 12 == JavaJS.scopeGetNumber( scope , "return" ) );
-
-
- return 0;
+ if ( debug ) cout << "func6 start" << endl;
+ for ( int i=0; i<100; i++ ) {
+ double val = i + 5;
+ JavaJS.scopeSetNumber( scope , "zzz" , val );
+ jlong func6 = JavaJS.functionCreate( " xxx = zzz; " );
+ jassert( ! JavaJS.invoke( scope , func6 ) );
+ double n = JavaJS.scopeGetNumber( scope , "xxx" );
+ jassert( val == n );
+ }
+ if ( debug ) cout << "func6 done" << endl;
+
+ jlong func7 = JavaJS.functionCreate( "return 11;" );
+ jassert( ! JavaJS.invoke( scope , func7 ) );
+ assert( 11 == JavaJS.scopeGetNumber( scope , "return" ) );
+
+ scope = JavaJS.scopeCreate();
+ jlong func8 = JavaJS.functionCreate( "function(){ return 12; }" );
+ jassert( ! JavaJS.invoke( scope , func8 ) );
+ assert( 12 == JavaJS.scopeGetNumber( scope , "return" ) );
+
+
+ return 0;
}
#if defined(_MAIN)
-int main() { return javajstest(); }
+int main() {
+ return javajstest();
+}
#endif
#endif
diff --git a/db/javajs.h b/db/javajs.h
index b70b5d357a1..82a36ef2561 100644
--- a/db/javajs.h
+++ b/db/javajs.h
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -46,130 +46,157 @@ const char * findEd(const char *);
class BSONObj;
class JavaJSImpl {
- public:
- JavaJSImpl();
- JavaJSImpl(const char *);
- ~JavaJSImpl();
-
- jlong scopeCreate();
- int scopeInit( jlong id , BSONObj * obj );
- int scopeSetThis( jlong id , BSONObj * obj );
- jboolean scopeReset( jlong id );
- void scopeFree( jlong id );
-
- double scopeGetNumber( jlong id , const char * field );
- string scopeGetString( jlong id , const char * field );
- jboolean scopeGetBoolean( jlong id , const char * field );
- BSONObj scopeGetObject( jlong id , const char * field );
- char scopeGetType( jlong id , const char * field );
-
- int scopeSetNumber( jlong id , const char * field , double val );
- int scopeSetString( jlong id , const char * field , const char * val );
- int scopeSetObject( jlong id , const char * field , BSONObj * obj );
- int scopeSetBoolean( jlong id , const char * field , jboolean val );
-
- jlong functionCreate( const char * code );
-
- /* return values:
- public static final int NO_SCOPE = -1;
- public static final int NO_FUNCTION = -2;
- public static final int INVOKE_ERROR = -3;
- public static final int INVOKE_SUCCESS = 0;
- */
- int invoke( jlong scope , jlong function );
-
- void printException();
-
- void run( const char * js );
-
- void detach( JNIEnv * env ){
- _jvm->DetachCurrentThread();
- }
-
- private:
-
- jobject create( const char * name ){
- jclass c = findClass( name );
- if ( ! c )
- return 0;
-
- jmethodID cons = _getEnv()->GetMethodID( c , "<init>" , "()V" );
- if ( ! cons )
- return 0;
-
- return _getEnv()->NewObject( c , cons );
- }
-
- jclass findClass( const char * name ){
- return _getEnv()->FindClass( name );
- }
-
-
- private:
-
- JNIEnv * _getEnv();
-
- JavaVM * _jvm;
- JNIEnv * _mainEnv;
- JavaVMInitArgs * _vmArgs;
-
- boost::thread_specific_ptr<JNIEnv> * _envs;
-
- jclass _dbhook;
- jclass _dbjni;
-
- jmethodID _scopeCreate;
- jmethodID _scopeInit;
- jmethodID _scopeSetThis;
- jmethodID _scopeReset;
- jmethodID _scopeFree;
-
- jmethodID _scopeGetNumber;
- jmethodID _scopeGetString;
- jmethodID _scopeGetObject;
- jmethodID _scopeGetBoolean;
- jmethodID _scopeGuessObjectSize;
- jmethodID _scopeGetType;
-
- jmethodID _scopeSetNumber;
- jmethodID _scopeSetString;
- jmethodID _scopeSetObject;
- jmethodID _scopeSetBoolean;
-
- jmethodID _functionCreate;
-
- jmethodID _invoke;
+public:
+ JavaJSImpl();
+ JavaJSImpl(const char *);
+ ~JavaJSImpl();
+
+ jlong scopeCreate();
+ int scopeInit( jlong id , BSONObj * obj );
+ int scopeSetThis( jlong id , BSONObj * obj );
+ jboolean scopeReset( jlong id );
+ void scopeFree( jlong id );
+
+ double scopeGetNumber( jlong id , const char * field );
+ string scopeGetString( jlong id , const char * field );
+ jboolean scopeGetBoolean( jlong id , const char * field );
+ BSONObj scopeGetObject( jlong id , const char * field );
+ char scopeGetType( jlong id , const char * field );
+
+ int scopeSetNumber( jlong id , const char * field , double val );
+ int scopeSetString( jlong id , const char * field , const char * val );
+ int scopeSetObject( jlong id , const char * field , BSONObj * obj );
+ int scopeSetBoolean( jlong id , const char * field , jboolean val );
+
+ jlong functionCreate( const char * code );
+
+ /* return values:
+ public static final int NO_SCOPE = -1;
+ public static final int NO_FUNCTION = -2;
+ public static final int INVOKE_ERROR = -3;
+ public static final int INVOKE_SUCCESS = 0;
+ */
+ int invoke( jlong scope , jlong function );
+
+ void printException();
+
+ void run( const char * js );
+
+ void detach( JNIEnv * env ) {
+ _jvm->DetachCurrentThread();
+ }
+
+private:
+
+ jobject create( const char * name ) {
+ jclass c = findClass( name );
+ if ( ! c )
+ return 0;
+
+ jmethodID cons = _getEnv()->GetMethodID( c , "<init>" , "()V" );
+ if ( ! cons )
+ return 0;
+
+ return _getEnv()->NewObject( c , cons );
+ }
+
+ jclass findClass( const char * name ) {
+ return _getEnv()->FindClass( name );
+ }
+
+
+private:
+
+ JNIEnv * _getEnv();
+
+ JavaVM * _jvm;
+ JNIEnv * _mainEnv;
+ JavaVMInitArgs * _vmArgs;
+
+ boost::thread_specific_ptr<JNIEnv> * _envs;
+
+ jclass _dbhook;
+ jclass _dbjni;
+
+ jmethodID _scopeCreate;
+ jmethodID _scopeInit;
+ jmethodID _scopeSetThis;
+ jmethodID _scopeReset;
+ jmethodID _scopeFree;
+
+ jmethodID _scopeGetNumber;
+ jmethodID _scopeGetString;
+ jmethodID _scopeGetObject;
+ jmethodID _scopeGetBoolean;
+ jmethodID _scopeGuessObjectSize;
+ jmethodID _scopeGetType;
+
+ jmethodID _scopeSetNumber;
+ jmethodID _scopeSetString;
+ jmethodID _scopeSetObject;
+ jmethodID _scopeSetBoolean;
+
+ jmethodID _functionCreate;
+
+ jmethodID _invoke;
};
extern JavaJSImpl *JavaJS;
// a javascript "scope"
-class Scope {
- public:
- Scope() { s = JavaJS->scopeCreate(); }
- ~Scope() { JavaJS->scopeFree(s); s = 0; }
- void reset() { JavaJS->scopeReset(s); }
-
- void init( const char * data ){
- BSONObj o( data , 0 );
- JavaJS->scopeInit( s , & o );
- }
-
- double getNumber(const char *field) { return JavaJS->scopeGetNumber(s,field); }
- string getString(const char *field) { return JavaJS->scopeGetString(s,field); }
- jboolean getBoolean(const char *field) { return JavaJS->scopeGetBoolean(s,field); }
- BSONObj getObject(const char *field ) { return JavaJS->scopeGetObject(s,field); }
- int type(const char *field ) { return JavaJS->scopeGetType(s,field); }
-
- void setNumber(const char *field, double val ) { JavaJS->scopeSetNumber(s,field,val); }
- void setString(const char *field, const char * val ) { JavaJS->scopeSetString(s,field,val); }
- void setObject(const char *field, BSONObj& obj ) { JavaJS->scopeSetObject(s,field,&obj); }
- void setBoolean(const char *field, jboolean val ) { JavaJS->scopeSetBoolean(s,field,val); }
-
- int invoke(jlong function) { return JavaJS->invoke(s,function); }
-
- jlong s;
+class Scope {
+public:
+ Scope() {
+ s = JavaJS->scopeCreate();
+ }
+ ~Scope() {
+ JavaJS->scopeFree(s);
+ s = 0;
+ }
+ void reset() {
+ JavaJS->scopeReset(s);
+ }
+
+ void init( const char * data ) {
+ BSONObj o( data , 0 );
+ JavaJS->scopeInit( s , & o );
+ }
+
+ double getNumber(const char *field) {
+ return JavaJS->scopeGetNumber(s,field);
+ }
+ string getString(const char *field) {
+ return JavaJS->scopeGetString(s,field);
+ }
+ jboolean getBoolean(const char *field) {
+ return JavaJS->scopeGetBoolean(s,field);
+ }
+ BSONObj getObject(const char *field ) {
+ return JavaJS->scopeGetObject(s,field);
+ }
+ int type(const char *field ) {
+ return JavaJS->scopeGetType(s,field);
+ }
+
+ void setNumber(const char *field, double val ) {
+ JavaJS->scopeSetNumber(s,field,val);
+ }
+ void setString(const char *field, const char * val ) {
+ JavaJS->scopeSetString(s,field,val);
+ }
+ void setObject(const char *field, BSONObj& obj ) {
+ JavaJS->scopeSetObject(s,field,&obj);
+ }
+ void setBoolean(const char *field, jboolean val ) {
+ JavaJS->scopeSetBoolean(s,field,val);
+ }
+
+ int invoke(jlong function) {
+ return JavaJS->invoke(s,function);
+ }
+
+ jlong s;
};
JNIEXPORT void JNICALL java_native_say(JNIEnv *, jclass, jobject outBuffer );
diff --git a/db/javatest.cpp b/db/javatest.cpp
index b4af248f7a9..22f2bdf8d3c 100644
--- a/db/javatest.cpp
+++ b/db/javatest.cpp
@@ -2,23 +2,23 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "javajs.h"
-int main(){
- JavaJS = new JavaJSImpl();
- javajstest();
+int main() {
+ JavaJS = new JavaJSImpl();
+ javajstest();
}
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index 4e53cc77c00..1980787fd35 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -24,249 +24,258 @@
BSONElement nullElement;
-string BSONElement::toString() const {
- stringstream s;
- switch( type() ) {
+string BSONElement::toString() const {
+ stringstream s;
+ switch ( type() ) {
case EOO:
- return "EOO";
+ return "EOO";
case Date:
- s << fieldName() << ": Date(" << hex << date() << ')'; break;
+ s << fieldName() << ": Date(" << hex << date() << ')';
+ break;
case RegEx:
- {
- s << fieldName() << ": /" << regex() << '/';
- const char *p = regexFlags();
- if( p ) s << p;
- }
+ {
+ s << fieldName() << ": /" << regex() << '/';
+ const char *p = regexFlags();
+ if ( p ) s << p;
+ }
+ break;
+ case NumberDouble:
+ case NumberInt:
+ s << fieldName() << ": " << number();
+ break;
+ case Bool:
+ s << fieldName() << ": " << ( boolean() ? "true" : "false" );
+ break;
+ case Object:
+ case Array:
+ s << fieldName() << ": " << embeddedObject().toString();
+ break;
+ case Undefined:
+ s << fieldName() << ": undefined";
+ break;
+ case jstNULL:
+ s << fieldName() << ": null";
+ break;
+ case MaxKey:
+ s << fieldName() << ": MaxKey";
+ break;
+ case MinKey:
+ s << fieldName() << ": MinKey";
break;
- case NumberDouble:
- case NumberInt:
- s << fieldName() << ": " << number(); break;
- case Bool:
- s << fieldName() << ": " << ( boolean() ? "true" : "false" ); break;
- case Object:
- case Array:
- s << fieldName() << ": " << embeddedObject().toString(); break;
- case Undefined:
- s << fieldName() << ": undefined"; break;
- case jstNULL:
- s << fieldName() << ": null"; break;
- case MaxKey:
- s << fieldName() << ": MaxKey"; break;
- case MinKey:
- s << fieldName() << ": MinKey"; break;
case CodeWScope:
- s << fieldName() << ": codewscope"; break;
- case Code:
- s << fieldName() << ": ";
- if( valuestrsize() > 80 )
- s << string(valuestr()).substr(0, 70) << "...";
- else {
- s << valuestr();
- }
- break;
+ s << fieldName() << ": codewscope";
+ break;
+ case Code:
+ s << fieldName() << ": ";
+ if ( valuestrsize() > 80 )
+ s << string(valuestr()).substr(0, 70) << "...";
+ else {
+ s << valuestr();
+ }
+ break;
case Symbol:
- case String:
- s << fieldName() << ": ";
- if( valuestrsize() > 80 )
- s << '"' << string(valuestr()).substr(0, 70) << "...\"";
- else {
- s << '"' << valuestr() << '"';
- }
- break;
- case DBRef:
- s << fieldName();
- s << " : DBRef('" << valuestr() << "',";
- {
- OID *x = (OID *) (valuestr() + valuestrsize());
- s << hex << x->a << x->b << dec << ')';
- }
- break;
- case jstOID:
- s << fieldName() << " : ObjId(";
- s << hex << oid().a << oid().b << dec << ')';
- break;
+ case String:
+ s << fieldName() << ": ";
+ if ( valuestrsize() > 80 )
+ s << '"' << string(valuestr()).substr(0, 70) << "...\"";
+ else {
+ s << '"' << valuestr() << '"';
+ }
+ break;
+ case DBRef:
+ s << fieldName();
+ s << " : DBRef('" << valuestr() << "',";
+ {
+ OID *x = (OID *) (valuestr() + valuestrsize());
+ s << hex << x->a << x->b << dec << ')';
+ }
+ break;
+ case jstOID:
+ s << fieldName() << " : ObjId(";
+ s << hex << oid().a << oid().b << dec << ')';
+ break;
default:
- s << fieldName() << ": ?type=" << type();
- break;
- }
- return s.str();
+ s << fieldName() << ": ?type=" << type();
+ break;
+ }
+ return s.str();
}
int BSONElement::size() const {
- if( totalSize >= 0 )
- return totalSize;
-
- int x = 1;
- switch( type() ) {
- case EOO:
- case Undefined:
- case jstNULL:
- case MaxKey:
- case MinKey:
- break;
- case Bool:
- x = 2;
- break;
- case NumberInt:
- x = 5;
- break;
- case Date:
- case NumberDouble:
- x = 9;
- break;
- case jstOID:
- x = 13;
- break;
- case Symbol:
- case Code:
- case String:
- x = valuestrsize() + 4 + 1;
- break;
- case CodeWScope:
- x = objsize() + 1;
- break;
-
- case DBRef:
- x = valuestrsize() + 4 + 12 + 1;
- break;
- case Object:
- case Array:
- x = objsize() + 1;
- break;
- case BinData:
- x = valuestrsize() + 4 + 1 + 1/*subtype*/;
- break;
- case RegEx:
- {
- const char *p = value();
- int len1 = strlen(p);
- p = p + len1 + 1;
- x = 1 + len1 + strlen(p) + 2;
- }
- break;
- default:
- cout << "BSONElement: bad type " << (int) type() << endl;
- assert(false);
- }
- ((BSONElement *) this)->totalSize = x + fieldNameSize;
-
- if( !eoo() ) {
- const char *next = data + totalSize;
- if( *next < MinKey || ( *next > JSTypeMax && *next != MaxKey ) ) {
- // bad type.
- cout << "***\n";
- cout << "Bad data or size in BSONElement::size()\n";
- cout << "bad type:" << (int) *next << '\n';
- cout << "totalsize:" << totalSize << " fieldnamesize:" << fieldNameSize << '\n';
- cout << "lastrec:" << endl;
- //dumpmemory(data, totalSize + 15);
- assert(false);
- }
- }
-
- return totalSize;
+ if ( totalSize >= 0 )
+ return totalSize;
+
+ int x = 1;
+ switch ( type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ break;
+ case Bool:
+ x = 2;
+ break;
+ case NumberInt:
+ x = 5;
+ break;
+ case Date:
+ case NumberDouble:
+ x = 9;
+ break;
+ case jstOID:
+ x = 13;
+ break;
+ case Symbol:
+ case Code:
+ case String:
+ x = valuestrsize() + 4 + 1;
+ break;
+ case CodeWScope:
+ x = objsize() + 1;
+ break;
+
+ case DBRef:
+ x = valuestrsize() + 4 + 12 + 1;
+ break;
+ case Object:
+ case Array:
+ x = objsize() + 1;
+ break;
+ case BinData:
+ x = valuestrsize() + 4 + 1 + 1/*subtype*/;
+ break;
+ case RegEx:
+ {
+ const char *p = value();
+ int len1 = strlen(p);
+ p = p + len1 + 1;
+ x = 1 + len1 + strlen(p) + 2;
+ }
+ break;
+ default:
+ cout << "BSONElement: bad type " << (int) type() << endl;
+ assert(false);
+ }
+ ((BSONElement *) this)->totalSize = x + fieldNameSize;
+
+ if ( !eoo() ) {
+ const char *next = data + totalSize;
+ if ( *next < MinKey || ( *next > JSTypeMax && *next != MaxKey ) ) {
+ // bad type.
+ cout << "***\n";
+ cout << "Bad data or size in BSONElement::size()\n";
+ cout << "bad type:" << (int) *next << '\n';
+ cout << "totalsize:" << totalSize << " fieldnamesize:" << fieldNameSize << '\n';
+ cout << "lastrec:" << endl;
+ //dumpmemory(data, totalSize + 15);
+ assert(false);
+ }
+ }
+
+ return totalSize;
}
int BSONElement::getGtLtOp() const {
- const char *fn = fieldName();
- if( fn[0] == '$' && fn[1] ) {
- if( fn[2] == 't' ) {
- if( fn[1] == 'g' ) {
- if( fn[3] == 0 ) return JSMatcher::GT;
- else if( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::GTE;
- }
- else if( fn[1] == 'l' ) {
- if( fn[3] == 0 ) return JSMatcher::LT;
- else if( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::LTE;
- }
- }
- else if( fn[2] == 'e' ) {
- if( fn[1] == 'n' && fn[3] == 0 )
+ const char *fn = fieldName();
+ if ( fn[0] == '$' && fn[1] ) {
+ if ( fn[2] == 't' ) {
+ if ( fn[1] == 'g' ) {
+ if ( fn[3] == 0 ) return JSMatcher::GT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::GTE;
+ }
+ else if ( fn[1] == 'l' ) {
+ if ( fn[3] == 0 ) return JSMatcher::LT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::LTE;
+ }
+ }
+ else if ( fn[2] == 'e' ) {
+ if ( fn[1] == 'n' && fn[3] == 0 )
return JSMatcher::NE;
}
- else if( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 )
- return JSMatcher::opIN;
- }
- return JSMatcher::Equality;
+ else if ( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 )
+ return JSMatcher::opIN;
+ }
+ return JSMatcher::Equality;
}
int BSONElement::woCompare( const BSONElement &e,
- bool considerFieldName ) const {
- int lt = (int) type();
- if( lt == NumberInt ) lt = NumberDouble;
- int rt = (int) e.type();
- if( rt == NumberInt ) rt = NumberDouble;
-
- int x = lt - rt;
- if( x != 0 )
- return x;
- if( considerFieldName ) {
- x = strcmp(fieldName(), e.fieldName());
- if( x != 0 )
- return x;
- }
- x = compareElementValues(*this, e);
- return x;
+ bool considerFieldName ) const {
+ int lt = (int) type();
+ if ( lt == NumberInt ) lt = NumberDouble;
+ int rt = (int) e.type();
+ if ( rt == NumberInt ) rt = NumberDouble;
+
+ int x = lt - rt;
+ if ( x != 0 )
+ return x;
+ if ( considerFieldName ) {
+ x = strcmp(fieldName(), e.fieldName());
+ if ( x != 0 )
+ return x;
+ }
+ x = compareElementValues(*this, e);
+ return x;
}
/* must be same type! */
int compareElementValues(const BSONElement& l, const BSONElement& r) {
- int f;
- double x;
- switch( l.type() ) {
- case EOO:
- case Undefined:
- case jstNULL:
- case MaxKey:
- case MinKey:
- f = l.type() - r.type();
- if( f<0 ) return -1;
- return f==0 ? 0 : 1;
- case Bool:
- return *l.value() - *r.value();
- case Date:
- if( l.date() < r.date() )
- return -1;
- return l.date() == r.date() ? 0 : 1;
- case NumberInt:
- case NumberDouble:
- x = l.number() - r.number();
- if( x < 0 ) return -1;
- return x == 0 ? 0 : 1;
- case jstOID:
- return memcmp(l.value(), r.value(), 12);
- case Code:
- case Symbol:
- case String:
- /* todo: utf version */
- return strcmp(l.valuestr(), r.valuestr());
- case Object:
- case Array:
- return l.embeddedObject().woCompare( r.embeddedObject() );
- case DBRef:
- {
- int lsz = l.valuesize();
- int rsz = r.valuesize();
- if( lsz - rsz != 0 ) return lsz - rsz;
- return memcmp(l.value(), r.value(), lsz);
- }
- case RegEx:
- {
- int c = strcmp(l.regex(), r.regex());
- if( c )
- return c;
- return strcmp(l.regexFlags(), r.regexFlags());
- }
- case BinData:
- // todo: just memcmp these.
- cout << "compareElementValues: can't compare this type:" << (int) l.type() << endl;
- assert(false);
- break;
- default:
- cout << "compareElementValues: bad type " << (int) l.type() << endl;
- assert(false);
- }
- return -1;
+ int f;
+ double x;
+ switch ( l.type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ f = l.type() - r.type();
+ if ( f<0 ) return -1;
+ return f==0 ? 0 : 1;
+ case Bool:
+ return *l.value() - *r.value();
+ case Date:
+ if ( l.date() < r.date() )
+ return -1;
+ return l.date() == r.date() ? 0 : 1;
+ case NumberInt:
+ case NumberDouble:
+ x = l.number() - r.number();
+ if ( x < 0 ) return -1;
+ return x == 0 ? 0 : 1;
+ case jstOID:
+ return memcmp(l.value(), r.value(), 12);
+ case Code:
+ case Symbol:
+ case String:
+ /* todo: utf version */
+ return strcmp(l.valuestr(), r.valuestr());
+ case Object:
+ case Array:
+ return l.embeddedObject().woCompare( r.embeddedObject() );
+ case DBRef:
+ {
+ int lsz = l.valuesize();
+ int rsz = r.valuesize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value(), r.value(), lsz);
+ }
+ case RegEx:
+ {
+ int c = strcmp(l.regex(), r.regex());
+ if ( c )
+ return c;
+ return strcmp(l.regexFlags(), r.regexFlags());
+ }
+ case BinData:
+ // todo: just memcmp these.
+ cout << "compareElementValues: can't compare this type:" << (int) l.type() << endl;
+ assert(false);
+ break;
+ default:
+ cout << "compareElementValues: bad type " << (int) l.type() << endl;
+ assert(false);
+ }
+ return -1;
}
/* JSMatcher --------------------------------------*/
@@ -277,335 +286,351 @@ int compareElementValues(const BSONElement& l, const BSONElement& r) {
// a : 3
// else we just append the element.
//
-void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e) {
- if( e.type() == Object ) {
- BSONElement fe = e.embeddedObject().firstElement();
- const char *fn = fe.fieldName();
- if( fn[0] == '$' && fn[1] && fn[2] == 't' ) {
- b.appendAs(fe, e.fieldName());
- return;
- }
- }
- b.append(e);
+void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e) {
+ if ( e.type() == Object ) {
+ BSONElement fe = e.embeddedObject().firstElement();
+ const char *fn = fe.fieldName();
+ if ( fn[0] == '$' && fn[1] && fn[2] == 't' ) {
+ b.appendAs(fe, e.fieldName());
+ return;
+ }
+ }
+ b.append(e);
}
-int getGtLtOp(BSONElement& e) {
- if( e.type() != Object )
- return JSMatcher::Equality;
+int getGtLtOp(BSONElement& e) {
+ if ( e.type() != Object )
+ return JSMatcher::Equality;
- BSONElement fe = e.embeddedObject().firstElement();
- return fe.getGtLtOp();
+ BSONElement fe = e.embeddedObject().firstElement();
+ return fe.getGtLtOp();
}
/* BSONObj ------------------------------------------------------------*/
string BSONObj::toString() const {
- if( isEmpty() ) return "{}";
-
- stringstream s;
- s << "{ ";
- BSONObjIterator i(*this);
- BSONElement e = i.next();
- if( !e.eoo() )
- while( 1 ) {
- s << e.toString();
- e = i.next();
- if( e.eoo() )
- break;
- s << ", ";
- }
- s << " }";
- return s.str();
+ if ( isEmpty() ) return "{}";
+
+ stringstream s;
+ s << "{ ";
+ BSONObjIterator i(*this);
+ BSONElement e = i.next();
+ if ( !e.eoo() )
+ while ( 1 ) {
+ s << e.toString();
+ e = i.next();
+ if ( e.eoo() )
+ break;
+ s << ", ";
+ }
+ s << " }";
+ return s.str();
}
// todo: can be a little faster if we don't use toString() here.
-bool BSONObj::valid() const {
- try {
+bool BSONObj::valid() const {
+ try {
toString();
}
- catch(...) {
+ catch (...) {
return false;
}
return true;
}
/* well ordered compare */
-int BSONObj::woCompare(const BSONObj& r, bool considerFieldName) const {
- if( isEmpty() )
- return r.isEmpty() ? 0 : -1;
- if( r.isEmpty() )
- return 1;
-
- BSONObjIterator i(*this);
- BSONObjIterator j(r);
- while( 1 ) {
- // so far, equal...
-
- BSONElement l = i.next();
- BSONElement r = j.next();
- if ( l.eoo() )
- return 0;
-
- int x = l.woCompare( r, considerFieldName );
- if ( x != 0 )
- return x;
- }
- return -1;
-}
+int BSONObj::woCompare(const BSONObj& r, bool considerFieldName) const {
+ if ( isEmpty() )
+ return r.isEmpty() ? 0 : -1;
+ if ( r.isEmpty() )
+ return 1;
+
+ BSONObjIterator i(*this);
+ BSONObjIterator j(r);
+ while ( 1 ) {
+ // so far, equal...
+
+ BSONElement l = i.next();
+ BSONElement r = j.next();
+ if ( l.eoo() )
+ return 0;
+
+ int x = l.woCompare( r, considerFieldName );
+ if ( x != 0 )
+ return x;
+ }
+ return -1;
+}
BSONElement BSONObj::getField(const char *name) const {
- if( details ) {
+ if ( details ) {
BSONObjIterator i(*this);
- while( i.more() ) {
+ while ( i.more() ) {
BSONElement e = i.next();
- if( e.eoo() )
+ if ( e.eoo() )
break;
- if( strcmp(e.fieldName(), name) == 0 )
+ if ( strcmp(e.fieldName(), name) == 0 )
return e;
}
}
- return nullElement;
+ return nullElement;
}
-/* return has eoo() true if no match
+/* return has eoo() true if no match
supports "." notation to reach into embedded objects
*/
BSONElement BSONObj::getFieldDotted(const char *name) const {
- BSONElement e = getField( name );
- if( e.eoo() ) {
- const char *p = strchr(name, '.');
- if( p ) {
- string left(name, p-name);
- BSONObj sub = getObjectField(left.c_str());
- return sub.isEmpty() ? nullElement : sub.getFieldDotted(p+1);
- }
- }
+ BSONElement e = getField( name );
+ if ( e.eoo() ) {
+ const char *p = strchr(name, '.');
+ if ( p ) {
+ string left(name, p-name);
+ BSONObj sub = getObjectField(left.c_str());
+ return sub.isEmpty() ? nullElement : sub.getFieldDotted(p+1);
+ }
+ }
return e;
-/*
- BSONObjIterator i(*this);
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- if( strcmp(e.fieldName(), name) == 0 )
- return e;
- }
- return nullElement;
-*/
+ /*
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if( e.eoo() )
+ break;
+ if( strcmp(e.fieldName(), name) == 0 )
+ return e;
+ }
+ return nullElement;
+ */
}
BSONElement BSONObj::getFieldDottedOrArray(const char *&name) const {
- const char *p = strchr(name, '.');
- string left;
- if ( p ) {
- left = string(name, p-name);
- name = p + 1;
- } else {
- left = string(name);
- name = name + strlen(name);
- }
- BSONElement sub = getField(left.c_str());
- if( sub.eoo() )
- return nullElement;
- else if( sub.type() == Array || strlen( name ) == 0 )
- return sub;
- else
- return sub.embeddedObject().getFieldDottedOrArray( name );
+ const char *p = strchr(name, '.');
+ string left;
+ if ( p ) {
+ left = string(name, p-name);
+ name = p + 1;
+ } else {
+ left = string(name);
+ name = name + strlen(name);
+ }
+ BSONElement sub = getField(left.c_str());
+ if ( sub.eoo() )
+ return nullElement;
+ else if ( sub.type() == Array || strlen( name ) == 0 )
+ return sub;
+ else
+ return sub.embeddedObject().getFieldDottedOrArray( name );
}
/* makes a new BSONObj with the fields specified in pattern.
fields returned in the order they appear in pattern.
if any field missing, you get back an empty object overall.
- n^2 implementation bad if pattern and object have lots
+ n^2 implementation bad if pattern and object have lots
of fields - normally pattern doesn't so should be fine.
*/
-BSONObj BSONObj::extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const {
- nameWithinArray = "";
- BSONObjIterator i(pattern);
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- const char *name = e.fieldName();
- BSONElement x = getFieldDottedOrArray( name );
- if( x.eoo() ) {
- nameWithinArray = "";
- return BSONObj();
- } else if ( x.type() == Array ) {
- // NOTE: Currently set based on last array discovered.
- nameWithinArray = name;
- }
- b.appendAs(x, "");
- }
- return b.done();
+BSONObj BSONObj::extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const {
+ nameWithinArray = "";
+ BSONObjIterator i(pattern);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ const char *name = e.fieldName();
+ BSONElement x = getFieldDottedOrArray( name );
+ if ( x.eoo() ) {
+ nameWithinArray = "";
+ return BSONObj();
+ } else if ( x.type() == Array ) {
+ // NOTE: Currently set based on last array discovered.
+ nameWithinArray = name;
+ }
+ b.appendAs(x, "");
+ }
+ return b.done();
}
-BSONObj BSONObj::extractFieldsUnDotted(BSONObj pattern) {
+BSONObj BSONObj::extractFieldsUnDotted(BSONObj pattern) {
BSONObjBuilder b;
- BSONObjIterator i(pattern);
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- BSONElement x = getField(e.fieldName());
- if( x.eoo() )
- return BSONObj();
- b.appendAs(x, "");
- }
- return b.doneAndDecouple();
+ BSONObjIterator i(pattern);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ BSONElement x = getField(e.fieldName());
+ if ( x.eoo() )
+ return BSONObj();
+ b.appendAs(x, "");
+ }
+ return b.doneAndDecouple();
}
-BSONObj BSONObj::extractFields(BSONObj& pattern) {
- BSONObjBuilder b(32); // scanandorder.h can make a zillion of these, so we start the allocation very small
- BSONObjIterator i(pattern);
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- BSONElement x = getFieldDotted(e.fieldName());
- if( x.eoo() )
- return BSONObj();
- b.append(x);
- }
- return b.doneAndDecouple();
+BSONObj BSONObj::extractFields(BSONObj& pattern) {
+ BSONObjBuilder b(32); // scanandorder.h can make a zillion of these, so we start the allocation very small
+ BSONObjIterator i(pattern);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ BSONElement x = getFieldDotted(e.fieldName());
+ if ( x.eoo() )
+ return BSONObj();
+ b.append(x);
+ }
+ return b.doneAndDecouple();
}
-int BSONObj::getIntField(const char *name) {
- BSONElement e = getField(name);
- return e.isNumber() ? (int) e.number() : INT_MIN;
+int BSONObj::getIntField(const char *name) {
+ BSONElement e = getField(name);
+ return e.isNumber() ? (int) e.number() : INT_MIN;
}
-bool BSONObj::getBoolField(const char *name) {
- BSONElement e = getField(name);
- return e.type() == Bool ? e.boolean() : false;
+bool BSONObj::getBoolField(const char *name) {
+ BSONElement e = getField(name);
+ return e.type() == Bool ? e.boolean() : false;
}
-const char * BSONObj::getStringField(const char *name) {
- BSONElement e = getField(name);
- return e.type() == String ? e.valuestr() : "";
+const char * BSONObj::getStringField(const char *name) {
+ BSONElement e = getField(name);
+ return e.type() == String ? e.valuestr() : "";
}
-BSONObj BSONObj::getObjectField(const char *name) const {
- BSONElement e = getField(name);
- BSONType t = e.type();
- return t == Object || t == Array ? e.embeddedObject() : BSONObj();
+BSONObj BSONObj::getObjectField(const char *name) const {
+ BSONElement e = getField(name);
+ BSONType t = e.type();
+ return t == Object || t == Array ? e.embeddedObject() : BSONObj();
}
int BSONObj::nFields() {
int n = 0;
- BSONObjIterator i(*this);
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- n++;
- }
- return n;
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ n++;
+ }
+ return n;
}
/* grab names of all the fields in this object */
int BSONObj::getFieldNames(set<string>& fields) {
- int n = 0;
- BSONObjIterator i(*this);
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- fields.insert(e.fieldName());
- n++;
- }
- return n;
+ int n = 0;
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ fields.insert(e.fieldName());
+ n++;
+ }
+ return n;
}
-/* note: addFields always adds _id even if not specified
+/* note: addFields always adds _id even if not specified
returns n added not counting _id unless requested.
*/
int BSONObj::addFields(BSONObj& from, set<string>& fields) {
- assert( details == 0 ); /* partial implementation for now... */
-
- BSONObjBuilder b;
-
- int N = fields.size();
- int n = 0;
- BSONObjIterator i(from);
- bool gotId = false;
- while( i.more() ) {
- BSONElement e = i.next();
- const char *fname = e.fieldName();
- if( fields.count(fname) ) {
- b.append(e);
- ++n;
- gotId = gotId || strcmp(fname, "_id")==0;
- if( n == N && gotId )
- break;
- } else if( strcmp(fname, "_id")==0 ) {
- b.append(e);
- gotId = true;
- if( n == N && gotId )
- break;
- }
- }
-
- if( n ) {
- int len;
- init( b.decouple(len), true );
- }
-
- return n;
+ assert( details == 0 ); /* partial implementation for now... */
+
+ BSONObjBuilder b;
+
+ int N = fields.size();
+ int n = 0;
+ BSONObjIterator i(from);
+ bool gotId = false;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *fname = e.fieldName();
+ if ( fields.count(fname) ) {
+ b.append(e);
+ ++n;
+ gotId = gotId || strcmp(fname, "_id")==0;
+ if ( n == N && gotId )
+ break;
+ } else if ( strcmp(fname, "_id")==0 ) {
+ b.append(e);
+ gotId = true;
+ if ( n == N && gotId )
+ break;
+ }
+ }
+
+ if ( n ) {
+ int len;
+ init( b.decouple(len), true );
+ }
+
+ return n;
}
ostream& operator<<( ostream &s, const BSONObj &o ) {
- return s << o.toString();
+ return s << o.toString();
}
/*-- test things ----------------------------------------------------*/
#pragma pack(push,1)
-struct MaxKeyData {
- MaxKeyData() { totsize=7; maxkey=MaxKey; name=0; eoo=EOO; }
- int totsize;
- char maxkey;
- char name;
- char eoo;
+struct MaxKeyData {
+ MaxKeyData() {
+ totsize=7;
+ maxkey=MaxKey;
+ name=0;
+ eoo=EOO;
+ }
+ int totsize;
+ char maxkey;
+ char name;
+ char eoo;
} maxkeydata;
BSONObj maxKey((const char *) &maxkeydata);
-struct MinKeyData {
- MinKeyData() { totsize=7; minkey=MinKey; name=0; eoo=EOO; }
- int totsize;
- char minkey;
- char name;
- char eoo;
+struct MinKeyData {
+ MinKeyData() {
+ totsize=7;
+ minkey=MinKey;
+ name=0;
+ eoo=EOO;
+ }
+ int totsize;
+ char minkey;
+ char name;
+ char eoo;
} minkeydata;
BSONObj minKey((const char *) &minkeydata);
struct JSObj0 {
- JSObj0() { totsize = 5; eoo = EOO; }
- int totsize;
- char eoo;
+ JSObj0() {
+ totsize = 5;
+ eoo = EOO;
+ }
+ int totsize;
+ char eoo;
} js0;
#pragma pack(pop)
-BSONElement::BSONElement() {
- data = &js0.eoo;
- fieldNameSize = 0;
- totalSize = -1;
+BSONElement::BSONElement() {
+ data = &js0.eoo;
+ fieldNameSize = 0;
+ totalSize = -1;
}
#pragma pack(push,1)
struct EmptyObject {
- EmptyObject() { len = 5; jstype = EOO; }
- int len;
- char jstype;
+ EmptyObject() {
+ len = 5;
+ jstype = EOO;
+ }
+ int len;
+ char jstype;
} emptyObject;
#pragma pack(pop)
BSONObj emptyObj((char *) &emptyObject);
-struct BsonUnitTest : public UnitTest {
- void testRegex() {
+struct BsonUnitTest : public UnitTest {
+ void testRegex() {
BSONObjBuilder b;
b.appendRegex("x", "foo");
BSONObj o = b.done();
@@ -617,7 +642,7 @@ struct BsonUnitTest : public UnitTest {
assert( o != p );
assert( o < p );
}
- void run() {
+ void run() {
testRegex();
BSONObjBuilder A,B,C;
A.appendInt("x", 2);
diff --git a/db/jsobj.h b/db/jsobj.h
index 8e0fa247cbc..a7c2f85c1a5 100644
--- a/db/jsobj.h
+++ b/db/jsobj.h
@@ -8,16 +8,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -36,17 +36,17 @@ class BSONObjBuilder;
#pragma pack(push,1)
-/* BinData = binary data types.
+/* BinData = binary data types.
EOO = end of object
*/
-enum BSONType {MinKey=-1, EOO=0, NumberDouble=1, String=2, Object=3, Array=4, BinData=5,
- Undefined=6, jstOID=7, Bool=8, Date=9 , jstNULL=10, RegEx=11 ,
- DBRef=12, Code=13, Symbol=14, CodeWScope=15 ,
- NumberInt = 16,
- JSTypeMax=16,
- MaxKey=127
+enum BSONType {MinKey=-1, EOO=0, NumberDouble=1, String=2, Object=3, Array=4, BinData=5,
+ Undefined=6, jstOID=7, Bool=8, Date=9 , jstNULL=10, RegEx=11 ,
+ DBRef=12, Code=13, Symbol=14, CodeWScope=15 ,
+ NumberInt = 16,
+ JSTypeMax=16,
+ MaxKey=127
-};
+ };
/* subtypes of BinData.
bdtCustom and above are ones that the JS compiler understands, but are
@@ -54,14 +54,18 @@ enum BSONType {MinKey=-1, EOO=0, NumberDouble=1, String=2, Object=3, Array=4, Bi
*/
enum BinDataType { Function=1, ByteArray=2, bdtCustom=128 };
-/* Object id's are optional for BSONObjects.
+/* Object id's are optional for BSONObjects.
When present they should be the first object member added.
*/
-struct OID {
- long long a;
- unsigned b;
- bool operator==(const OID& r) { return a==r.a&&b==r.b; }
- void out(){ cout << hex << a << hex << b << endl; };
+struct OID {
+ long long a;
+ unsigned b;
+ bool operator==(const OID& r) {
+ return a==r.a&&b==r.b;
+ }
+ void out() {
+ cout << hex << a << hex << b << endl;
+ };
};
/* marshalled js object format:
@@ -91,511 +95,557 @@ struct OID {
#pragma pack(pop)
-/* <type><fieldName ><value>
+/* <type><fieldName ><value>
-------- size() ------------
-fieldNameSize-
value()
type()
*/
class BSONElement {
- friend class BSONObjIterator;
- friend class BSONObj;
+ friend class BSONObjIterator;
+ friend class BSONObj;
public:
- string toString() const;
- BSONType type() const { return (BSONType) *data; }
- bool eoo() const { return type() == EOO; }
- int size() const;
+ string toString() const;
+ BSONType type() const {
+ return (BSONType) *data;
+ }
+ bool eoo() const {
+ return type() == EOO;
+ }
+ int size() const;
- // wrap this element up as a singleton object.
- BSONObj wrap();
+ // wrap this element up as a singleton object.
+ BSONObj wrap();
- const char * fieldName() const {
- if( eoo() ) return ""; // no fieldname for it.
- return data + 1;
- }
+ const char * fieldName() const {
+ if ( eoo() ) return ""; // no fieldname for it.
+ return data + 1;
+ }
- // raw data be careful:
- const char * value() const { return (data + fieldNameSize + 1); }
- int valuesize() const { return size() - fieldNameSize - 1; }
+ // raw data be careful:
+ const char * value() const {
+ return (data + fieldNameSize + 1);
+ }
+ int valuesize() const {
+ return size() - fieldNameSize - 1;
+ }
- bool isBoolean() const { return type() == Bool; }
- bool boolean() const { return *value() ? true : false; }
+ bool isBoolean() const {
+ return type() == Bool;
+ }
+ bool boolean() const {
+ return *value() ? true : false;
+ }
- unsigned long long date() const { return *((unsigned long long*) value()); }
- //double& number() { return *((double *) value()); }
+ unsigned long long date() const {
+ return *((unsigned long long*) value());
+ }
+ //double& number() { return *((double *) value()); }
- bool isNumber() const { return type() == NumberDouble || type() == NumberInt; }
- void setNumber(double d) {
- if( type() == NumberDouble ) *((double *) value()) = d;
- else if( type() == NumberInt ) *((int *) value()) = (int) d;
- }
- double number() const {
- if( type() == NumberDouble ) return *((double *) value());
- if( type() == NumberInt ) return *((int *) value());
- return 0;
- }
- OID& oid() const { return *((OID*) value()); }
+ bool isNumber() const {
+ return type() == NumberDouble || type() == NumberInt;
+ }
+ void setNumber(double d) {
+ if ( type() == NumberDouble ) *((double *) value()) = d;
+ else if ( type() == NumberInt ) *((int *) value()) = (int) d;
+ }
+ double number() const {
+ if ( type() == NumberDouble ) return *((double *) value());
+ if ( type() == NumberInt ) return *((int *) value());
+ return 0;
+ }
+ OID& oid() const {
+ return *((OID*) value());
+ }
- // for strings
- int valuestrsize() const {
- return *((int *) value());
- }
+ // for strings
+ int valuestrsize() const {
+ return *((int *) value());
+ }
- // for objects the size *includes* the size of the size field
- int objsize() const {
- return *((int *) value());
- }
+ // for objects the size *includes* the size of the size field
+ int objsize() const {
+ return *((int *) value());
+ }
- // for strings. also gives you start of the real data for an embedded object
- const char * valuestr() const { return value() + 4; }
+ // for strings. also gives you start of the real data for an embedded object
+ const char * valuestr() const {
+ return value() + 4;
+ }
- const char *valuestrsafe() const {
- return type() == String ? valuestr() : "";
- }
+ const char *valuestrsafe() const {
+ return type() == String ? valuestr() : "";
+ }
- const char * codeWScopeCode() const { return value() + 8; }
- const char * codeWScopeScopeData() const {
- // TODO fix
- return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
- }
-
- BSONObj embeddedObject() const;
+ const char * codeWScopeCode() const {
+ return value() + 8;
+ }
+ const char * codeWScopeScopeData() const {
+ // TODO fix
+ return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
+ }
+
+ BSONObj embeddedObject() const;
/* uassert if not an object */
- BSONObj embeddedObjectUserCheck();
+ BSONObj embeddedObjectUserCheck();
- const char *regex() const { assert(type() == RegEx); return value(); }
- const char *regexFlags() const {
- const char *p = regex();
- return p + strlen(p) + 1;
- }
+ const char *regex() const {
+ assert(type() == RegEx);
+ return value();
+ }
+ const char *regexFlags() const {
+ const char *p = regex();
+ return p + strlen(p) + 1;
+ }
- /* like operator== but doesn't check the fieldname,
- just the value.
- */
+ /* like operator== but doesn't check the fieldname,
+ just the value.
+ */
bool valuesEqual(const BSONElement& r) const {
- if( isNumber() )
+ if ( isNumber() )
return number() == r.number() && r.isNumber();
- bool match= valuesize() == r.valuesize() &&
- memcmp(value(),r.value(),valuesize()) == 0;
- return match;
- // todo: make "0" == 0.0, undefined==null
- }
+ bool match= valuesize() == r.valuesize() &&
+ memcmp(value(),r.value(),valuesize()) == 0;
+ return match;
+ // todo: make "0" == 0.0, undefined==null
+ }
- bool operator==(const BSONElement& r) const {
- if( strcmp(fieldName(), r.fieldName()) != 0 )
- return false;
- return valuesEqual(r);
-/*
- int sz = size();
- return sz == r.size() &&
- memcmp(data, r.data, sz) == 0;
-*/
- }
+ bool operator==(const BSONElement& r) const {
+ if ( strcmp(fieldName(), r.fieldName()) != 0 )
+ return false;
+ return valuesEqual(r);
+ /*
+ int sz = size();
+ return sz == r.size() &&
+ memcmp(data, r.data, sz) == 0;
+ */
+ }
+
+
+ /* <0: l<r. 0:l==r. >0:l>r
+ order by type, field name, and field value.
+ If considerFieldName is true, pay attention to the field name.
+ */
+ int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
+
+ const char * rawdata() {
+ return data;
+ }
-
- /* <0: l<r. 0:l==r. >0:l>r
- order by type, field name, and field value.
- If considerFieldName is true, pay attention to the field name.
- */
- int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
-
- const char * rawdata() { return data; }
+ int getGtLtOp() const;
- int getGtLtOp() const;
-
- BSONElement();
+ BSONElement();
private:
- BSONElement(const char *d) : data(d) {
- fieldNameSize = eoo() ? 0 : strlen(fieldName()) + 1;
- totalSize = -1;
- }
- const char *data;
- int fieldNameSize;
- int totalSize; /* caches the computed size */
+ BSONElement(const char *d) : data(d) {
+ fieldNameSize = eoo() ? 0 : strlen(fieldName()) + 1;
+ totalSize = -1;
+ }
+ const char *data;
+ int fieldNameSize;
+ int totalSize; /* caches the computed size */
};
/* l and r MUST have same type when called: check that first. */
int compareElementValues(const BSONElement& l, const BSONElement& r);
int getGtLtOp(BSONElement& e);
-
+
class BSONObj {
- friend class BSONObjIterator;
- class Details {
- public:
- ~Details() {
- // note refCount means two different things (thus the assert here)
- assert(refCount <= 0);
- if (owned()) {
- free((void *)_objdata);
- }
- _objdata = 0;
- }
- const char *_objdata;
- int _objsize;
- int refCount; // -1 == don't free (we don't "own" the buffer)
- bool owned() { return refCount >= 0; }
- } *details;
- void init(const char *data, bool ifree) {
- details = new Details();
- details->_objdata = data;
- details->_objsize = *((int*) data);
- assert( details->_objsize > 0 );
- assert( details->_objsize <= 1024 * 1024 * 16 );
- details->refCount = ifree ? 1 : -1;
- }
+ friend class BSONObjIterator;
+ class Details {
+ public:
+ ~Details() {
+ // note refCount means two different things (thus the assert here)
+ assert(refCount <= 0);
+ if (owned()) {
+ free((void *)_objdata);
+ }
+ _objdata = 0;
+ }
+ const char *_objdata;
+ int _objsize;
+ int refCount; // -1 == don't free (we don't "own" the buffer)
+ bool owned() {
+ return refCount >= 0;
+ }
+ } *details;
+ void init(const char *data, bool ifree) {
+ details = new Details();
+ details->_objdata = data;
+ details->_objsize = *((int*) data);
+ assert( details->_objsize > 0 );
+ assert( details->_objsize <= 1024 * 1024 * 16 );
+ details->refCount = ifree ? 1 : -1;
+ }
public:
- explicit BSONObj(const char *msgdata, bool ifree = false) {
- init(msgdata, ifree);
- }
- BSONObj(Record *r);
- BSONObj() : details(0) { }
- ~BSONObj() {
- if( details ) {
- if( --details->refCount <= 0 )
- delete details;
- details = 0;
- }
- }
+ explicit BSONObj(const char *msgdata, bool ifree = false) {
+ init(msgdata, ifree);
+ }
+ BSONObj(Record *r);
+ BSONObj() : details(0) { }
+ ~BSONObj() {
+ if ( details ) {
+ if ( --details->refCount <= 0 )
+ delete details;
+ details = 0;
+ }
+ }
- void appendSelfToBufBuilder(BufBuilder& b) {
- assert( objsize() );
- b.append((void *) objdata(), objsize());
- }
+ void appendSelfToBufBuilder(BufBuilder& b) {
+ assert( objsize() );
+ b.append((void *) objdata(), objsize());
+ }
- /* switch the buffer's ownership to us. */
- void iWillFree() {
- assert( !details->owned() );
- details->refCount = 1;
- }
+ /* switch the buffer's ownership to us. */
+ void iWillFree() {
+ assert( !details->owned() );
+ details->refCount = 1;
+ }
- string toString() const;
- /* note: addFields always adds _id even if not specified */
- int addFields(BSONObj& from, set<string>& fields); /* returns n added */
+ string toString() const;
+ /* note: addFields always adds _id even if not specified */
+ int addFields(BSONObj& from, set<string>& fields); /* returns n added */
/* returns # of top level fields in the object
- note: iterates to count the fields
+ note: iterates to count the fields
*/
int nFields();
/* adds the field names to the fields set. does NOT clear it (appends). */
- int getFieldNames(set<string>& fields);
+ int getFieldNames(set<string>& fields);
- /* return has eoo() true if no match
- supports "." notation to reach into embedded objects
- */
- BSONElement getFieldDotted(const char *name) const;
- // Like above, but returns first array encountered while traversing the
- // dotted fields of name. The name variable is updated to represent field
- // names with respect to the returned element.
- BSONElement getFieldDottedOrArray(const char *&name) const;
+ /* return has eoo() true if no match
+ supports "." notation to reach into embedded objects
+ */
+ BSONElement getFieldDotted(const char *name) const;
+ // Like above, but returns first array encountered while traversing the
+ // dotted fields of name. The name variable is updated to represent field
+ // names with respect to the returned element.
+ BSONElement getFieldDottedOrArray(const char *&name) const;
- BSONElement getField(const char *name) const; /* return has eoo() true if no match */
+ BSONElement getField(const char *name) const; /* return has eoo() true if no match */
- // returns "" if DNE or wrong type
- const char * getStringField(const char *name);
+ // returns "" if DNE or wrong type
+ const char * getStringField(const char *name);
- BSONObj getObjectField(const char *name) const;
+ BSONObj getObjectField(const char *name) const;
int getIntField(const char *name); // INT_MIN if not present
- bool getBoolField(const char *name);
+ bool getBoolField(const char *name);
- /* makes a new BSONObj with the fields specified in pattern.
+ /* makes a new BSONObj with the fields specified in pattern.
fields returned in the order they appear in pattern.
- if any field missing, you get back an empty object overall.
- */
- // sets element field names to empty string
- // If an array is encountered while scanning the dotted names in pattern,
- // that array is added to the returned obj, rather than any subobjects
- // referenced within the array. The variable nameWithinArray is set to the
- // name of the requested field within the returned array.
- BSONObj extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const; // this version, builder owns the returned obj buffer
- // sets element field names to empty string
+ if any field missing, you get back an empty object overall.
+ */
+ // sets element field names to empty string
+ // If an array is encountered while scanning the dotted names in pattern,
+ // that array is added to the returned obj, rather than any subobjects
+ // referenced within the array. The variable nameWithinArray is set to the
+ // name of the requested field within the returned array.
+ BSONObj extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const; // this version, builder owns the returned obj buffer
+ // sets element field names to empty string
BSONObj extractFieldsUnDotted(BSONObj pattern);
- // returns elements with original field names
- BSONObj extractFields(BSONObj &pattern);
+ // returns elements with original field names
+ BSONObj extractFields(BSONObj &pattern);
- const char *objdata() const { return details->_objdata; }
- int objsize() const { return details ? details->_objsize : 0; } // includes the embedded size field
- bool isEmpty() const { return objsize() <= 5; }
+ const char *objdata() const {
+ return details->_objdata;
+ }
+ int objsize() const {
+ return details ? details->_objsize : 0; // includes the embedded size field
+ }
+ bool isEmpty() const {
+ return objsize() <= 5;
+ }
/* sigh...details == 0 is such a pain we have to eliminate that possibility */
void validateEmpty();
- void dump() {
+ void dump() {
cout << hex;
const char *p = objdata();
- for( int i = 0; i < objsize(); i++ ) {
+ for ( int i = 0; i < objsize(); i++ ) {
cout << i << '\t' << (unsigned) *p;
- if( *p >= 'A' && *p <= 'z' )
+ if ( *p >= 'A' && *p <= 'z' )
cout << '\t' << *p;
cout << endl;
p++;
}
}
- bool operator<(const BSONObj& r) const { return woCompare(r) < 0; }
+ bool operator<(const BSONObj& r) const {
+ return woCompare(r) < 0;
+ }
- /* <0: l<r. 0:l==r. >0:l>r
- wo='well ordered'. fields must be in same order in each object.
- */
- int woCompare(const BSONObj& r, bool considerFieldName=true) const;
+ /* <0: l<r. 0:l==r. >0:l>r
+ wo='well ordered'. fields must be in same order in each object.
+ */
+ int woCompare(const BSONObj& r, bool considerFieldName=true) const;
- /* note this is "shallow equality" -- ints and doubles won't match. for a
+ /* note this is "shallow equality" -- ints and doubles won't match. for a
deep equality test use woCompare (which is slower).
*/
- bool woEqual(const BSONObj& r) const {
- int os = objsize();
- if( os == r.objsize() ) {
+ bool woEqual(const BSONObj& r) const {
+ int os = objsize();
+ if ( os == r.objsize() ) {
return (os == 0 || memcmp(objdata(),r.objdata(),os)==0);
}
return false;
- }
- bool operator==(const BSONObj& r) const {
- return this->woEqual(r);
- }
- bool operator!=(const BSONObj& r) const {
- return !operator==( r );
- }
+ }
+ bool operator==(const BSONObj& r) const {
+ return this->woEqual(r);
+ }
+ bool operator!=(const BSONObj& r) const {
+ return !operator==( r );
+ }
- BSONElement firstElement() const {
- return BSONElement(objdata() + 4);
- }
- BSONElement findElement(const char *name);
- BSONElement findElement(string name) { return findElement(name.c_str()); }
- bool hasElement(const char *name);
-
- OID* getOID() {
- BSONElement e = firstElement();
- if( e.type() != jstOID )
- return 0;
- return &e.oid();
- }
+ BSONElement firstElement() const {
+ return BSONElement(objdata() + 4);
+ }
+ BSONElement findElement(const char *name);
+ BSONElement findElement(string name) {
+ return findElement(name.c_str());
+ }
+ bool hasElement(const char *name);
- BSONObj(const BSONObj& r) {
- if( r.details == 0 )
- details = 0;
- else if( r.details->owned() ) {
- details = r.details;
- details->refCount++;
- }
- else {
- details = new Details(*r.details);
- }
- }
- BSONObj& operator=(const BSONObj& r) {
- if( details && details->owned() ) {
- if( --details->refCount == 0 )
- delete details;
- }
-
- if( r.details == 0 )
- details = 0;
- else if( r.details->owned() ) {
- details = r.details;
- details->refCount++;
- }
- else {
- details = new Details(*r.details);
- }
- return *this;
- }
+ OID* getOID() {
+ BSONElement e = firstElement();
+ if ( e.type() != jstOID )
+ return 0;
+ return &e.oid();
+ }
- /* makes a copy of the object. Normally, a jsobj points to data "owned"
- by something else. this is a useful way to get your own copy of the buffer
- data (which is freed when the new jsobj destructs).
- */
- BSONObj copy();
-
- int hash() const {
- unsigned x = 0;
- const char *p = objdata();
- for( int i = 0; i < objsize(); i++ )
- x = x * 131 + p[i];
- return (x & 0x7fffffff) | 0x8000000; // must be > 0
- }
+ BSONObj(const BSONObj& r) {
+ if ( r.details == 0 )
+ details = 0;
+ else if ( r.details->owned() ) {
+ details = r.details;
+ details->refCount++;
+ }
+ else {
+ details = new Details(*r.details);
+ }
+ }
+ BSONObj& operator=(const BSONObj& r) {
+ if ( details && details->owned() ) {
+ if ( --details->refCount == 0 )
+ delete details;
+ }
+
+ if ( r.details == 0 )
+ details = 0;
+ else if ( r.details->owned() ) {
+ details = r.details;
+ details->refCount++;
+ }
+ else {
+ details = new Details(*r.details);
+ }
+ return *this;
+ }
+
+ /* makes a copy of the object. Normally, a jsobj points to data "owned"
+ by something else. this is a useful way to get your own copy of the buffer
+ data (which is freed when the new jsobj destructs).
+ */
+ BSONObj copy();
+
+ int hash() const {
+ unsigned x = 0;
+ const char *p = objdata();
+ for ( int i = 0; i < objsize(); i++ )
+ x = x * 131 + p[i];
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+ }
// true unless corrupt
- bool valid() const;
+ bool valid() const;
};
ostream& operator<<( ostream &s, const BSONObj &o );
-
-class BSONObjBuilder {
+
+class BSONObjBuilder {
public:
- BSONObjBuilder(int initsize=512) : b(initsize) { b.skip(4); /*leave room for size field*/ }
+ BSONObjBuilder(int initsize=512) : b(initsize) {
+ b.skip(4); /*leave room for size field*/
+ }
- /* add all the fields from the object specified to this object */
- void appendElements(BSONObj x);
+ /* add all the fields from the object specified to this object */
+ void appendElements(BSONObj x);
- void append(BSONElement& e) {
- assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
- b.append((void*) e.rawdata(), e.size());
- }
+ void append(BSONElement& e) {
+ assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ b.append((void*) e.rawdata(), e.size());
+ }
- /* append an element but with a new name */
- void appendAs(const BSONElement& e, const char *as) {
- b.append((char) e.type());
- b.append(as);
- b.append((void *) e.value(), e.valuesize());
- }
+ /* append an element but with a new name */
+ void appendAs(const BSONElement& e, const char *as) {
+ b.append((char) e.type());
+ b.append(as);
+ b.append((void *) e.value(), e.valuesize());
+ }
- /* add a subobject as a member */
- void append(const char *fieldName, BSONObj subObj) {
- b.append((char) Object);
- b.append(fieldName);
- b.append((void *) subObj.objdata(), subObj.objsize());
- }
+ /* add a subobject as a member */
+ void append(const char *fieldName, BSONObj subObj) {
+ b.append((char) Object);
+ b.append(fieldName);
+ b.append((void *) subObj.objdata(), subObj.objsize());
+ }
- void appendBool(const char *fieldName, int val) {
- b.append((char) Bool);
- b.append(fieldName);
- b.append((char) (val?1:0));
- }
- void appendInt(const char *fieldName, int n) {
- b.append((char) NumberInt);
- b.append(fieldName);
- b.append(n);
- }
- void append(const char *fieldName, double n) {
- b.append((char) NumberDouble);
- b.append(fieldName);
- b.append(n);
- }
- void appendOID(const char *fieldName, OID *oid = 0) {
- b.append((char) jstOID);
- b.append(fieldName);
- b.append((long long) (oid ? oid->a : 0));
- b.append((unsigned) (oid ? oid->b : 0));
- }
- void appendDate(const char *fieldName, unsigned long long dt) {
- b.append((char) Date);
- b.append(fieldName);
- b.append(dt);
- }
- void appendRegex(const char *fieldName, const char *regex, const char *options = "") {
- b.append((char) RegEx);
- b.append(fieldName);
- b.append(regex);
- b.append(options);
- }
- void append(const char *fieldName, const char *str) {
- b.append((char) String);
- b.append(fieldName);
- b.append((int) strlen(str)+1);
- b.append(str);
- }
- void append(const char *fieldName, string str) {
- append(fieldName, str.c_str());
- }
- // Append an element that is less than all other keys.
- void appendMinKey( const char *fieldName ) {
- b.append( (char) MinKey );
- b.append( fieldName );
- }
- // Append an element that is greater than all other keys.
- void appendMaxKey( const char *fieldName ) {
- b.append( (char) MaxKey );
- b.append( fieldName );
- }
-
- template < class T >
- void append( const char *fieldName, const vector< T >& vals ) {
- BSONObjBuilder arrBuilder;
- for( unsigned int i = 0; i < vals.size(); ++i )
- arrBuilder.append( numStr( i ).c_str(), vals[ i ] );
- marshalArray( fieldName, arrBuilder.done() );
- }
-
- void appendIntArray( const char *fieldName, const vector< int >& vals ) {
- BSONObjBuilder arrBuilder;
- for( unsigned i = 0; i < vals.size(); ++i )
- arrBuilder.appendInt( numStr( i ).c_str(), vals[ i ] );
- marshalArray( fieldName, arrBuilder.done() );
- }
+ void appendBool(const char *fieldName, int val) {
+ b.append((char) Bool);
+ b.append(fieldName);
+ b.append((char) (val?1:0));
+ }
+ void appendInt(const char *fieldName, int n) {
+ b.append((char) NumberInt);
+ b.append(fieldName);
+ b.append(n);
+ }
+ void append(const char *fieldName, double n) {
+ b.append((char) NumberDouble);
+ b.append(fieldName);
+ b.append(n);
+ }
+ void appendOID(const char *fieldName, OID *oid = 0) {
+ b.append((char) jstOID);
+ b.append(fieldName);
+ b.append((long long) (oid ? oid->a : 0));
+ b.append((unsigned) (oid ? oid->b : 0));
+ }
+ void appendDate(const char *fieldName, unsigned long long dt) {
+ b.append((char) Date);
+ b.append(fieldName);
+ b.append(dt);
+ }
+ void appendRegex(const char *fieldName, const char *regex, const char *options = "") {
+ b.append((char) RegEx);
+ b.append(fieldName);
+ b.append(regex);
+ b.append(options);
+ }
+ void append(const char *fieldName, const char *str) {
+ b.append((char) String);
+ b.append(fieldName);
+ b.append((int) strlen(str)+1);
+ b.append(str);
+ }
+ void append(const char *fieldName, string str) {
+ append(fieldName, str.c_str());
+ }
+ // Append an element that is less than all other keys.
+ void appendMinKey( const char *fieldName ) {
+ b.append( (char) MinKey );
+ b.append( fieldName );
+ }
+ // Append an element that is greater than all other keys.
+ void appendMaxKey( const char *fieldName ) {
+ b.append( (char) MaxKey );
+ b.append( fieldName );
+ }
- /* BSONObj will free the buffer when it is finished. */
- BSONObj doneAndDecouple() {
- int l;
- return BSONObj(decouple(l), true);
- }
+ template < class T >
+ void append( const char *fieldName, const vector< T >& vals ) {
+ BSONObjBuilder arrBuilder;
+ for ( unsigned int i = 0; i < vals.size(); ++i )
+ arrBuilder.append( numStr( i ).c_str(), vals[ i ] );
+ marshalArray( fieldName, arrBuilder.done() );
+ }
- /* this version, jsobjbuilder still frees the jsobj
- when the builder goes out of scope. use it this way
- by default, that's simplest.
- */
- BSONObj done() {
- return BSONObj(_done());
- }
+ void appendIntArray( const char *fieldName, const vector< int >& vals ) {
+ BSONObjBuilder arrBuilder;
+ for ( unsigned i = 0; i < vals.size(); ++i )
+ arrBuilder.appendInt( numStr( i ).c_str(), vals[ i ] );
+ marshalArray( fieldName, arrBuilder.done() );
+ }
+
+ /* BSONObj will free the buffer when it is finished. */
+ BSONObj doneAndDecouple() {
+ int l;
+ return BSONObj(decouple(l), true);
+ }
+
+ /* this version, jsobjbuilder still frees the jsobj
+ when the builder goes out of scope. use it this way
+ by default, that's simplest.
+ */
+ BSONObj done() {
+ return BSONObj(_done());
+ }
- /* assume ownership of the buffer - you must then free it (with free()) */
- char* decouple(int& l) {
- char *x = _done();
+ /* assume ownership of the buffer - you must then free it (with free()) */
+ char* decouple(int& l) {
+ char *x = _done();
assert( x );
- l = b.len();
- b.decouple();
- return x;
- }
- void decouple() { b.decouple(); } // post done() call version. be sure jsobj frees...
+ l = b.len();
+ b.decouple();
+ return x;
+ }
+ void decouple() {
+ b.decouple(); // post done() call version. be sure jsobj frees...
+ }
private:
- // Append the provided arr object as an array.
- void marshalArray( const char *fieldName, const BSONObj &arr ) {
- b.append( (char) Array );
- b.append( fieldName );
- b.append( (void *) arr.objdata(), arr.objsize() );
- }
+ // Append the provided arr object as an array.
+ void marshalArray( const char *fieldName, const BSONObj &arr ) {
+ b.append( (char) Array );
+ b.append( fieldName );
+ b.append( (void *) arr.objdata(), arr.objsize() );
+ }
- string numStr( int i ) const {
- stringstream o;
- o << i;
- return o.str();
- }
-
- char* _done() {
- b.append((char) EOO);
- char *data = b.buf();
- *((int*)data) = b.len();
- return data;
- }
+ string numStr( int i ) const {
+ stringstream o;
+ o << i;
+ return o.str();
+ }
+
+ char* _done() {
+ b.append((char) EOO);
+ char *data = b.buf();
+ *((int*)data) = b.len();
+ return data;
+ }
- BufBuilder b;
+ BufBuilder b;
};
-/* iterator for a BSONObj
+/* iterator for a BSONObj
- Note each BSONObj ends with an EOO element: so you will get more() on an empty
+ Note each BSONObj ends with an EOO element: so you will get more() on an empty
object, although next().eoo() will be true.
*/
class BSONObjIterator {
public:
- BSONObjIterator(const BSONObj& jso) {
+ BSONObjIterator(const BSONObj& jso) {
int sz = jso.objsize();
- if( sz == 0 ) {
- pos = theend = 0; return;
+ if ( sz == 0 ) {
+ pos = theend = 0;
+ return;
}
- pos = jso.objdata() + 4;
- theend = jso.objdata() + sz;
- }
- bool more() { return pos < theend; }
- BSONElement next() {
- BSONElement e(pos);
- pos += e.size();
- return e;
- }
+ pos = jso.objdata() + 4;
+ theend = jso.objdata() + sz;
+ }
+ bool more() {
+ return pos < theend;
+ }
+ BSONElement next() {
+ BSONElement e(pos);
+ pos += e.size();
+ return e;
+ }
private:
- const char *pos;
- const char *theend;
+ const char *pos;
+ const char *theend;
};
-/* iterator a BSONObj which is an array, in array order.
+/* iterator a BSONObj which is an array, in array order.
class JSArrayIter {
public:
BSONObjIterator(const BSONObj& jso) {
...
}
- bool more() { return ... }
+ bool more() { return ... }
BSONElement next() {
...
}
@@ -606,97 +656,102 @@ public:
extern BSONObj maxKey;
extern BSONObj minKey;
-
+
/*- just for testing -- */
#pragma pack(push,1)
struct JSObj1 {
- JSObj1() {
- totsize=sizeof(JSObj1);
- n = NumberDouble; strcpy_s(nname, 5, "abcd"); N = 3.1;
- s = String; strcpy_s(sname, 7, "abcdef"); slen = 10;
- strcpy_s(sval, 10, "123456789"); eoo = EOO;
- }
- unsigned totsize;
+ JSObj1() {
+ totsize=sizeof(JSObj1);
+ n = NumberDouble;
+ strcpy_s(nname, 5, "abcd");
+ N = 3.1;
+ s = String;
+ strcpy_s(sname, 7, "abcdef");
+ slen = 10;
+ strcpy_s(sval, 10, "123456789");
+ eoo = EOO;
+ }
+ unsigned totsize;
- char n;
- char nname[5];
- double N;
+ char n;
+ char nname[5];
+ double N;
- char s;
- char sname[7];
- unsigned slen;
- char sval[10];
+ char s;
+ char sname[7];
+ unsigned slen;
+ char sval[10];
- char eoo;
+ char eoo;
};
#pragma pack(pop)
extern JSObj1 js1;
-inline BSONObj BSONElement::embeddedObjectUserCheck() {
- uassert( "invalid parameter: expected an object", type()==Object || type()==Array );
- return BSONObj(value());
+inline BSONObj BSONElement::embeddedObjectUserCheck() {
+ uassert( "invalid parameter: expected an object", type()==Object || type()==Array );
+ return BSONObj(value());
}
-inline BSONObj BSONElement::embeddedObject() const {
- assert( type()==Object || type()==Array );
- return BSONObj(value());
+inline BSONObj BSONElement::embeddedObject() const {
+ assert( type()==Object || type()==Array );
+ return BSONObj(value());
}
-inline BSONObj BSONObj::copy() {
- if( isEmpty() )
- return *this;
+inline BSONObj BSONObj::copy() {
+ if ( isEmpty() )
+ return *this;
- char *p = (char*) malloc(objsize());
- memcpy(p, objdata(), objsize());
- return BSONObj(p, true);
+ char *p = (char*) malloc(objsize());
+ memcpy(p, objdata(), objsize());
+ return BSONObj(p, true);
}
// wrap this element up as a singleton object.
-inline BSONObj BSONElement::wrap() {
- BSONObjBuilder b;
- b.append(*this);
- return b.doneAndDecouple();
+inline BSONObj BSONElement::wrap() {
+ BSONObjBuilder b;
+ b.append(*this);
+ return b.doneAndDecouple();
}
-inline bool BSONObj::hasElement(const char *name) {
- if( !isEmpty() ) {
- BSONObjIterator it(*this);
- while( it.more() ) {
- BSONElement e = it.next();
- if( strcmp(name, e.fieldName()) == 0 )
- return true;
- }
- }
+inline bool BSONObj::hasElement(const char *name) {
+ if ( !isEmpty() ) {
+ BSONObjIterator it(*this);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( strcmp(name, e.fieldName()) == 0 )
+ return true;
+ }
+ }
return false;
}
-inline BSONElement BSONObj::findElement(const char *name) {
- if( !isEmpty() ) {
- BSONObjIterator it(*this);
- while( it.more() ) {
- BSONElement e = it.next();
- if( strcmp(name, e.fieldName()) == 0 )
- return e;
- }
- }
- return BSONElement();
+inline BSONElement BSONObj::findElement(const char *name) {
+ if ( !isEmpty() ) {
+ BSONObjIterator it(*this);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( strcmp(name, e.fieldName()) == 0 )
+ return e;
+ }
+ }
+ return BSONElement();
}
/* add all the fields from the object specified to this object */
-inline void BSONObjBuilder::appendElements(BSONObj x) {
- BSONObjIterator it(x);
- while( it.more() ) {
- BSONElement e = it.next();
- if( e.eoo() ) break;
- append(e);
- }
+inline void BSONObjBuilder::appendElements(BSONObj x) {
+ BSONObjIterator it(x);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( e.eoo() ) break;
+ append(e);
+ }
}
extern BSONObj emptyObj;
-
-inline void BSONObj::validateEmpty() {
- if( details == 0 )
+
+inline void BSONObj::validateEmpty() {
+ if ( details == 0 )
*this = emptyObj;
}
diff --git a/db/json.cpp b/db/json.cpp
index aa884f259e5..da1a8c54da0 100644
--- a/db/json.cpp
+++ b/db/json.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -22,48 +22,55 @@
/* partial implementation for now */
-void skipWhite(const char *&p) {
- while( *p == ' ' || *p == '\r' || *p == '\n' || *p == '\t' )
- p++;
+void skipWhite(const char *&p) {
+ while ( *p == ' ' || *p == '\r' || *p == '\n' || *p == '\t' )
+ p++;
}
-void value(BSONObjBuilder& b, const char *&p, string& id) {
- if( strncmp(p, "ObjId()", 7) == 0 ) {
- p += 7;
- b.appendOID(id.c_str());
- }
- else if( *p == '1' ) {
- b.append(id.c_str(), 1);
- p++;
- }
- else {
- assert(false);
- }
+void value(BSONObjBuilder& b, const char *&p, string& id) {
+ if ( strncmp(p, "ObjId()", 7) == 0 ) {
+ p += 7;
+ b.appendOID(id.c_str());
+ }
+ else if ( *p == '1' ) {
+ b.append(id.c_str(), 1);
+ p++;
+ }
+ else {
+ assert(false);
+ }
}
-void _fromjson(BSONObjBuilder& b, const char *&p) {
- while( 1 ) {
- skipWhite(p);
- if( *p == 0 )
- break;
- if( *p == '{' ) { _fromjson(b,++p); continue; }
- if( *p == '}' ) { ++p; break; }
- if( *p == '_' || isalpha(*p) ) {
- string id;
- while( *p == '_' || isalpha(*p) || isdigit(*p) ) {
- id += *p++;
- }
- skipWhite(p);
- assert( *p == ':' ); p++;
- skipWhite(p);
- value(b, p, id);
- continue;
- }
- }
+void _fromjson(BSONObjBuilder& b, const char *&p) {
+ while ( 1 ) {
+ skipWhite(p);
+ if ( *p == 0 )
+ break;
+ if ( *p == '{' ) {
+ _fromjson(b,++p);
+ continue;
+ }
+ if ( *p == '}' ) {
+ ++p;
+ break;
+ }
+ if ( *p == '_' || isalpha(*p) ) {
+ string id;
+ while ( *p == '_' || isalpha(*p) || isdigit(*p) ) {
+ id += *p++;
+ }
+ skipWhite(p);
+ assert( *p == ':' );
+ p++;
+ skipWhite(p);
+ value(b, p, id);
+ continue;
+ }
+ }
}
-BSONObj fromjson(const char *str) {
- BSONObjBuilder b;
- _fromjson(b,str);
- return b.doneAndDecouple();
+BSONObj fromjson(const char *str) {
+ BSONObjBuilder b;
+ _fromjson(b,str);
+ return b.doneAndDecouple();
}
diff --git a/db/json.h b/db/json.h
index 25fdca03be7..604c8759d79 100644
--- a/db/json.h
+++ b/db/json.h
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/db/matcher.cpp b/db/matcher.cpp
index 0a29a760dc6..e95149208bf 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -4,16 +4,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -31,12 +31,12 @@ using namespace stdext;
typedef const char * MyStr;
struct less_str {
- bool operator()(const MyStr & x, const MyStr & y) const {
- if ( strcmp(x, y) > 0)
- return true;
+ bool operator()(const MyStr & x, const MyStr & y) const {
+ if ( strcmp(x, y) > 0)
+ return true;
- return false;
- }
+ return false;
+ }
};
typedef hash_map<const char*, int, hash_compare<const char *, less_str> > strhashmap;
@@ -48,12 +48,12 @@ using namespace __gnu_cxx;
typedef const char * MyStr;
struct eq_str {
- bool operator()(const MyStr & x, const MyStr & y) const {
- if ( strcmp(x, y) == 0)
- return true;
+ bool operator()(const MyStr & x, const MyStr & y) const {
+ if ( strcmp(x, y) == 0)
+ return true;
- return false;
- }
+ return false;
+ }
};
typedef hash_map<const char*, int, hash<const char *>, eq_str > strhashmap;
@@ -63,211 +63,214 @@ typedef hash_map<const char*, int, hash<const char *>, eq_str > strhashmap;
//#include "minilex.h"
//MiniLex minilex;
-class Where {
+class Where {
public:
- Where() { jsScope = 0; }
- ~Where() {
+ Where() {
+ jsScope = 0;
+ }
+ ~Where() {
#if !defined(NOJNI)
- JavaJS->scopeFree(scope);
+ JavaJS->scopeFree(scope);
#endif
- if ( jsScope )
- delete jsScope;
- scope = 0; func = 0;
- }
+ if ( jsScope )
+ delete jsScope;
+ scope = 0;
+ func = 0;
+ }
- jlong scope, func;
+ jlong scope, func;
BSONObj *jsScope;
-
- void setFunc(const char *code) {
+
+ void setFunc(const char *code) {
#if !defined(NOJNI)
- func = JavaJS->functionCreate( code );
+ func = JavaJS->functionCreate( code );
#endif
- }
+ }
};
-JSMatcher::~JSMatcher() {
- for( int i = 0; i < nBuilders; i++ )
- delete builders[i];
- delete in;
- delete where;
+JSMatcher::~JSMatcher() {
+ for ( int i = 0; i < nBuilders; i++ )
+ delete builders[i];
+ delete in;
+ delete where;
}
#include "pdfile.h"
/* _jsobj - the query pattern
- indexKeyPattern - the "key pattern" / template of what is in the keys of the index we are using.
+ indexKeyPattern - the "key pattern" / template of what is in the keys of the index we are using.
used to set indexMatches return value from matches()
*/
-JSMatcher::JSMatcher(BSONObj &_jsobj, BSONObj indexKeyPattern) :
- in(0), where(0), jsobj(_jsobj), nRegex(0)
+JSMatcher::JSMatcher(BSONObj &_jsobj, BSONObj indexKeyPattern) :
+ in(0), where(0), jsobj(_jsobj), nRegex(0)
{
checkInIndex = !indexKeyPattern.isEmpty();
- nBuilders = 0;
- BSONObjIterator i(jsobj);
- n = 0;
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
-
- if( ( e.type() == CodeWScope || e.type() == Code ) && strcmp(e.fieldName(), "$where")==0 ) {
- // $where: function()...
- uassert( "$where occurs twice?", where == 0 );
- where = new Where();
- uassert( "$where query, but jni is disabled", JavaJS );
+ nBuilders = 0;
+ BSONObjIterator i(jsobj);
+ n = 0;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+
+ if ( ( e.type() == CodeWScope || e.type() == Code ) && strcmp(e.fieldName(), "$where")==0 ) {
+ // $where: function()...
+ uassert( "$where occurs twice?", where == 0 );
+ where = new Where();
+ uassert( "$where query, but jni is disabled", JavaJS );
#if !defined(NOJNI)
- where->scope = JavaJS->scopeCreate();
- JavaJS->scopeSetString(where->scope, "$client", database->name.c_str());
-
- if ( e.type() == CodeWScope ){
- where->setFunc( e.codeWScopeCode() );
- where->jsScope = new BSONObj( e.codeWScopeScopeData() , 0 );
- }
- else {
- const char *code = e.valuestr();
- where->setFunc(code);
- }
+ where->scope = JavaJS->scopeCreate();
+ JavaJS->scopeSetString(where->scope, "$client", database->name.c_str());
+
+ if ( e.type() == CodeWScope ) {
+ where->setFunc( e.codeWScopeCode() );
+ where->jsScope = new BSONObj( e.codeWScopeScopeData() , 0 );
+ }
+ else {
+ const char *code = e.valuestr();
+ where->setFunc(code);
+ }
#endif
- continue;
- }
-
- if( e.type() == RegEx ) {
- if( nRegex >= 4 ) {
- cout << "ERROR: too many regexes in query" << endl;
- }
- else {
- pcrecpp::RE_Options options;
- options.set_utf8(true);
- const char *flags = e.regexFlags();
- while( flags && *flags ) {
- if( *flags == 'i' )
- options.set_caseless(true);
- else if( *flags == 'm' )
- options.set_multiline(true);
- else if( *flags == 'x' )
- options.set_extended(true);
- flags++;
- }
+ continue;
+ }
+
+ if ( e.type() == RegEx ) {
+ if ( nRegex >= 4 ) {
+ cout << "ERROR: too many regexes in query" << endl;
+ }
+ else {
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ const char *flags = e.regexFlags();
+ while ( flags && *flags ) {
+ if ( *flags == 'i' )
+ options.set_caseless(true);
+ else if ( *flags == 'm' )
+ options.set_multiline(true);
+ else if ( *flags == 'x' )
+ options.set_extended(true);
+ flags++;
+ }
RegexMatcher& rm = regexs[nRegex];
- rm.re = new pcrecpp::RE(e.regex(), options);
- rm.fieldName = e.fieldName();
+ rm.re = new pcrecpp::RE(e.regex(), options);
+ rm.fieldName = e.fieldName();
rm.inIndex = indexKeyPattern.hasElement(rm.fieldName);
- nRegex++;
- }
- continue;
- }
-
- // greater than / less than...
- // e.g., e == { a : { $gt : 3 } }
- // or
- // { a : { $in : [1,2,3] } }
- if( e.type() == Object ) {
- // e.g., fe == { $gt : 3 }
- BSONObjIterator j(e.embeddedObject());
- bool ok = false;
- while( j.more() ) {
- BSONElement fe = j.next();
- if( fe.eoo() )
- break;
- // BSONElement fe = e.embeddedObject().firstElement();
- const char *fn = fe.fieldName();
+ nRegex++;
+ }
+ continue;
+ }
+
+ // greater than / less than...
+ // e.g., e == { a : { $gt : 3 } }
+ // or
+ // { a : { $in : [1,2,3] } }
+ if ( e.type() == Object ) {
+ // e.g., fe == { $gt : 3 }
+ BSONObjIterator j(e.embeddedObject());
+ bool ok = false;
+ while ( j.more() ) {
+ BSONElement fe = j.next();
+ if ( fe.eoo() )
+ break;
+ // BSONElement fe = e.embeddedObject().firstElement();
+ const char *fn = fe.fieldName();
/* TODO: use getGtLtOp() here. this code repeats ourself */
- if( fn[0] == '$' && fn[1] ) {
- if( fn[2] == 't' ) {
- int op = Equality;
- if( fn[1] == 'g' ) {
- if( fn[3] == 0 ) op = GT;
- else if( fn[3] == 'e' && fn[4] == 0 ) op = GTE;
- else
- uassert("invalid $operator", false);
- }
- else if( fn[1] == 'l' ) {
- if( fn[3] == 0 ) op = LT;
- else if( fn[3] == 'e' && fn[4] == 0 ) op = LTE;
- else
- uassert("invalid $operator", false);
- }
- else
- uassert("invalid $operator", false);
- if( op ) {
- uassert("too many items to match in query", nBuilders < 8);
- BSONObjBuilder *b = new BSONObjBuilder();
- builders[nBuilders++] = b;
- b->appendAs(fe, e.fieldName());
+ if ( fn[0] == '$' && fn[1] ) {
+ if ( fn[2] == 't' ) {
+ int op = Equality;
+ if ( fn[1] == 'g' ) {
+ if ( fn[3] == 0 ) op = GT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) op = GTE;
+ else
+ uassert("invalid $operator", false);
+ }
+ else if ( fn[1] == 'l' ) {
+ if ( fn[3] == 0 ) op = LT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) op = LTE;
+ else
+ uassert("invalid $operator", false);
+ }
+ else
+ uassert("invalid $operator", false);
+ if ( op ) {
+ uassert("too many items to match in query", nBuilders < 8);
+ BSONObjBuilder *b = new BSONObjBuilder();
+ builders[nBuilders++] = b;
+ b->appendAs(fe, e.fieldName());
addBasic(b->done().firstElement(), op, indexKeyPattern);
- ok = true;
- }
- }
- else if( fn[2] == 'e' ) {
- if( fn[1] == 'n' && fn[3] == 0 ) {
+ ok = true;
+ }
+ }
+ else if ( fn[2] == 'e' ) {
+ if ( fn[1] == 'n' && fn[3] == 0 ) {
// $ne
uassert("too many items to match in query", nBuilders < 8);
- BSONObjBuilder *b = new BSONObjBuilder();
- builders[nBuilders++] = b;
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), NE, indexKeyPattern);
- ok = true;
+ BSONObjBuilder *b = new BSONObjBuilder();
+ builders[nBuilders++] = b;
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), NE, indexKeyPattern);
+ ok = true;
}
else
uassert("invalid $operator", false);
}
- else if( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 && fe.type() == Array ) {
- // $in
+ else if ( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 && fe.type() == Array ) {
+ // $in
uassert( "only 1 $in statement per query supported", in == 0 ); // todo...
- in = new set<BSONElement,element_lt>();
- BSONObjIterator i(fe.embeddedObject());
- if( i.more() ) {
- while( 1 ) {
+ in = new set<BSONElement,element_lt>();
+ BSONObjIterator i(fe.embeddedObject());
+ if ( i.more() ) {
+ while ( 1 ) {
BSONElement ie = i.next();
- if( ie.eoo() )
+ if ( ie.eoo() )
break;
in->insert(ie);
}
}
addBasic(e, opIN, indexKeyPattern); // e not actually used at the moment for $in
- ok = true;
- }
- else
- uassert("invalid $operator", false);
- }
- else {
- ok = false;
- break;
- }
- }
- if( ok )
- continue;
- }
+ ok = true;
+ }
+ else
+ uassert("invalid $operator", false);
+ }
+ else {
+ ok = false;
+ break;
+ }
+ }
+ if ( ok )
+ continue;
+ }
// normal, simple case e.g. { a : "foo" }
addBasic(e, Equality, indexKeyPattern);
- }
+ }
}
-inline int JSMatcher::valuesMatch(BSONElement& l, BSONElement& r, int op) {
- if( op == 0 )
- return l.valuesEqual(r);
+inline int JSMatcher::valuesMatch(BSONElement& l, BSONElement& r, int op) {
+ if ( op == 0 )
+ return l.valuesEqual(r);
- if( op == NE ) {
+ if ( op == NE ) {
return !l.valuesEqual(r);
}
- if( op == opIN ) {
- // { $in : [1,2,3] }
+ if ( op == opIN ) {
+ // { $in : [1,2,3] }
int c = in->count(l);
return c;
- }
-
- /* check LT, GTE, ... */
- if( l.type() != r.type() )
- return false;
- int c = compareElementValues(l, r);
- if( c < -1 ) c = -1;
- if( c > 1 ) c = 1;
- int z = 1 << (c+1);
- return (op & z);
+ }
+
+ /* check LT, GTE, ... */
+ if ( l.type() != r.type() )
+ return false;
+ int c = compareElementValues(l, r);
+ if ( c < -1 ) c = -1;
+ if ( c > 1 ) c = 1;
+ int z = 1 << (c+1);
+ return (op & z);
}
/* Check if a particular field matches.
@@ -277,7 +280,7 @@ inline int JSMatcher::valuesMatch(BSONElement& l, BSONElement& r, int op) {
obj - database object to check against
compareOp - Equality, LT, GT, etc.
deep - out param. set to true/false if we scanned an array
- isArr -
+ isArr -
Special forms:
@@ -290,161 +293,161 @@ inline int JSMatcher::valuesMatch(BSONElement& l, BSONElement& r, int op) {
0 missing element
1 match
*/
-int JSMatcher::matchesDotted(const char *fieldName, BSONElement& toMatch, BSONObj& obj, int compareOp, bool *deep, bool isArr) {
- {
- const char *p = strchr(fieldName, '.');
- if( p ) {
- string left(fieldName, p-fieldName);
-
- BSONElement e = obj.getField(left.c_str());
- if( e.eoo() )
- return 0;
- if( e.type() != Object && e.type() != Array )
- return -1;
-
- BSONObj eo = e.embeddedObject();
- return matchesDotted(p+1, toMatch, eo, compareOp, deep, e.type() == Array);
- }
- }
-
- BSONElement e = obj.getField(fieldName);
-
- if( valuesMatch(e, toMatch, compareOp) ) {
- return 1;
- }
- else if( e.type() == Array ) {
- BSONObjIterator ai(e.embeddedObject());
- while( ai.more() ) {
- BSONElement z = ai.next();
- if( valuesMatch( z, toMatch, compareOp) ) {
- if( deep )
- *deep = true;
- return 1;
- }
- }
- }
- else if( isArr ) {
- BSONObjIterator ai(obj);
- while( ai.more() ) {
- BSONElement z = ai.next();
- if( z.type() == Object ) {
- BSONObj eo = z.embeddedObject();
- int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, deep);
- if( cmp > 0 ) {
- if( deep ) *deep = true;
- return 1;
- }
- }
- }
- }
- else if( e.eoo() ) {
+int JSMatcher::matchesDotted(const char *fieldName, BSONElement& toMatch, BSONObj& obj, int compareOp, bool *deep, bool isArr) {
+ {
+ const char *p = strchr(fieldName, '.');
+ if ( p ) {
+ string left(fieldName, p-fieldName);
+
+ BSONElement e = obj.getField(left.c_str());
+ if ( e.eoo() )
+ return 0;
+ if ( e.type() != Object && e.type() != Array )
+ return -1;
+
+ BSONObj eo = e.embeddedObject();
+ return matchesDotted(p+1, toMatch, eo, compareOp, deep, e.type() == Array);
+ }
+ }
+
+ BSONElement e = obj.getField(fieldName);
+
+ if ( valuesMatch(e, toMatch, compareOp) ) {
+ return 1;
+ }
+ else if ( e.type() == Array ) {
+ BSONObjIterator ai(e.embeddedObject());
+ while ( ai.more() ) {
+ BSONElement z = ai.next();
+ if ( valuesMatch( z, toMatch, compareOp) ) {
+ if ( deep )
+ *deep = true;
+ return 1;
+ }
+ }
+ }
+ else if ( isArr ) {
+ BSONObjIterator ai(obj);
+ while ( ai.more() ) {
+ BSONElement z = ai.next();
+ if ( z.type() == Object ) {
+ BSONObj eo = z.embeddedObject();
+ int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, deep);
+ if ( cmp > 0 ) {
+ if ( deep ) *deep = true;
+ return 1;
+ }
+ }
+ }
+ }
+ else if ( e.eoo() ) {
// 0 indicatse "missing element"
return 0;
- }
- return -1;
+ }
+ return -1;
}
extern int dump;
-inline bool _regexMatches(RegexMatcher& rm, BSONElement& e) {
- char buf[64];
- const char *p = buf;
- if( e.type() == String || e.type() == Symbol )
- p = e.valuestr();
- else if( e.isNumber() ) {
- sprintf(buf, "%f", e.number());
- }
- else if( e.type() == Date ) {
- unsigned long long d = e.date();
- time_t t = (d/1000);
- time_t_to_String(t, buf);
- }
- else
- return false;
- return rm.re->PartialMatch(p);
+inline bool _regexMatches(RegexMatcher& rm, BSONElement& e) {
+ char buf[64];
+ const char *p = buf;
+ if ( e.type() == String || e.type() == Symbol )
+ p = e.valuestr();
+ else if ( e.isNumber() ) {
+ sprintf(buf, "%f", e.number());
+ }
+ else if ( e.type() == Date ) {
+ unsigned long long d = e.date();
+ time_t t = (d/1000);
+ time_t_to_String(t, buf);
+ }
+ else
+ return false;
+ return rm.re->PartialMatch(p);
}
/* todo: internal dotted notation scans -- not done yet here. */
-inline bool regexMatches(RegexMatcher& rm, BSONElement& e, bool *deep) {
- if( e.type() != Array )
- return _regexMatches(rm, e);
-
- BSONObjIterator ai(e.embeddedObject());
- while( ai.more() ) {
- BSONElement z = ai.next();
- if( _regexMatches(rm, z) ) {
- if( deep )
- *deep = true;
- return true;
- }
- }
- return false;
+inline bool regexMatches(RegexMatcher& rm, BSONElement& e, bool *deep) {
+ if ( e.type() != Array )
+ return _regexMatches(rm, e);
+
+ BSONObjIterator ai(e.embeddedObject());
+ while ( ai.more() ) {
+ BSONElement z = ai.next();
+ if ( _regexMatches(rm, z) ) {
+ if ( deep )
+ *deep = true;
+ return true;
+ }
+ }
+ return false;
}
/* See if an object matches the query.
- deep - return true when means we looked into arrays for a match
+ deep - return true when means we looked into arrays for a match
- Wondering if it would be worth having
+ Wondering if it would be worth having
if( !inIndex && !ok ) continue;
in each loop to bypass those checks. probably not worth checking as usually we are ok.
*/
bool JSMatcher::matches(BSONObj& jsobj, bool *deep) {
- if( deep )
- *deep = false;
+ if ( deep )
+ *deep = false;
- /* assuming there is usually only one thing to match. if more this
- could be slow sometimes. */
+ /* assuming there is usually only one thing to match. if more this
+ could be slow sometimes. */
- // check normal non-regex cases:
- for( int i = 0; i < n; i++ ) {
+ // check normal non-regex cases:
+ for ( int i = 0; i < n; i++ ) {
BasicMatcher& bm = basics[i];
- BSONElement& m = bm.toMatch;
- // -1=mismatch. 0=missing element. 1=match
- int cmp = matchesDotted(m.fieldName(), m, jsobj, bm.compareOp, deep);
+ BSONElement& m = bm.toMatch;
+ // -1=mismatch. 0=missing element. 1=match
+ int cmp = matchesDotted(m.fieldName(), m, jsobj, bm.compareOp, deep);
- if( cmp < 0 )
- return false;
- if( cmp == 0 ) {
+ if ( cmp < 0 )
+ return false;
+ if ( cmp == 0 ) {
/* missing is ok iff we were looking for null */
- if( m.type() == jstNULL || m.type() == Undefined ) {
- if( bm.compareOp == NE ) {
+ if ( m.type() == jstNULL || m.type() == Undefined ) {
+ if ( bm.compareOp == NE ) {
return false;
}
} else {
- return false;
+ return false;
}
}
- }
-
- for( int r = 0; r < nRegex; r++ ) {
- RegexMatcher& rm = regexs[r];
- BSONElement e = jsobj.getFieldDotted(rm.fieldName);
- if( e.eoo() )
- return false;
- if( !regexMatches(rm, e, deep) )
- return false;
- }
-
- if( where ) {
- if( where->func == 0 ) {
+ }
+
+ for ( int r = 0; r < nRegex; r++ ) {
+ RegexMatcher& rm = regexs[r];
+ BSONElement e = jsobj.getFieldDotted(rm.fieldName);
+ if ( e.eoo() )
+ return false;
+ if ( !regexMatches(rm, e, deep) )
+ return false;
+ }
+
+ if ( where ) {
+ if ( where->func == 0 ) {
uassert("$where compile error", false);
return false; // didn't compile
}
-#if !defined(NOJNI)
- /**if( 1 || jsobj.objsize() < 200 || where->fullObject ) */
+#if !defined(NOJNI)
+ /**if( 1 || jsobj.objsize() < 200 || where->fullObject ) */
{
- if ( where->jsScope ){
+ if ( where->jsScope ) {
JavaJS->scopeInit( where->scope , where->jsScope );
}
- JavaJS->scopeSetThis(where->scope, &jsobj);
- JavaJS->scopeSetObject(where->scope, "obj", &jsobj);
- }
+ JavaJS->scopeSetThis(where->scope, &jsobj);
+ JavaJS->scopeSetObject(where->scope, "obj", &jsobj);
+ }
/*else {
BSONObjBuilder b;
where->buildSubset(jsobj, b);
BSONObj temp = b.done();
JavaJS->scopeSetObject(where->scope, "obj", &temp);
}*/
- if( JavaJS->invoke(where->scope, where->func) ) {
+ if ( JavaJS->invoke(where->scope, where->func) ) {
uassert("error in invocation of $where function", false);
return false;
}
@@ -461,63 +464,66 @@ struct JSObj1 js1;
#pragma pack(push,1)
struct JSObj2 {
- JSObj2() {
- totsize=sizeof(JSObj2);
- s = String; strcpy_s(sname, 7, "abcdef"); slen = 10;
- strcpy_s(sval, 10, "123456789"); eoo = EOO;
- }
- unsigned totsize;
- char s;
- char sname[7];
- unsigned slen;
- char sval[10];
- char eoo;
+ JSObj2() {
+ totsize=sizeof(JSObj2);
+ s = String;
+ strcpy_s(sname, 7, "abcdef");
+ slen = 10;
+ strcpy_s(sval, 10, "123456789");
+ eoo = EOO;
+ }
+ unsigned totsize;
+ char s;
+ char sname[7];
+ unsigned slen;
+ char sval[10];
+ char eoo;
} js2;
struct JSUnitTest : public UnitTest {
- void run() {
-
- BSONObj j1((const char *) &js1);
- BSONObj j2((const char *) &js2);
- JSMatcher m(j2, BSONObj());
- assert( m.matches(j1) );
- js2.sval[0] = 'z';
- assert( !m.matches(j1) );
- JSMatcher n(j1, BSONObj());
- assert( n.matches(j1) );
- assert( !n.matches(j2) );
-
- BSONObj j0 = emptyObj;
+ void run() {
+
+ BSONObj j1((const char *) &js1);
+ BSONObj j2((const char *) &js2);
+ JSMatcher m(j2, BSONObj());
+ assert( m.matches(j1) );
+ js2.sval[0] = 'z';
+ assert( !m.matches(j1) );
+ JSMatcher n(j1, BSONObj());
+ assert( n.matches(j1) );
+ assert( !n.matches(j2) );
+
+ BSONObj j0 = emptyObj;
// BSONObj j0((const char *) &js0);
- JSMatcher p(j0, BSONObj());
- assert( p.matches(j1) );
- assert( p.matches(j2) );
- }
+ JSMatcher p(j0, BSONObj());
+ assert( p.matches(j1) );
+ assert( p.matches(j2) );
+ }
} jsunittest;
#pragma pack(pop)
-struct RXTest : public UnitTest {
+struct RXTest : public UnitTest {
- RXTest() {
+ RXTest() {
}
- void run() {
- /*
- static const boost::regex e("(\\d{4}[- ]){3}\\d{4}");
- static const boost::regex b(".....");
- cout << "regex result: " << regex_match("hello", e) << endl;
- cout << "regex result: " << regex_match("abcoo", b) << endl;
- */
- pcrecpp::RE re1(")({a}h.*o");
- pcrecpp::RE re("h.llo");
- assert( re.FullMatch("hello") );
- assert( !re1.FullMatch("hello") );
-
-
- pcrecpp::RE_Options options;
- options.set_utf8(true);
- pcrecpp::RE part("dwi", options);
- assert( part.PartialMatch("dwight") );
- }
+ void run() {
+ /*
+ static const boost::regex e("(\\d{4}[- ]){3}\\d{4}");
+ static const boost::regex b(".....");
+ cout << "regex result: " << regex_match("hello", e) << endl;
+ cout << "regex result: " << regex_match("abcoo", b) << endl;
+ */
+ pcrecpp::RE re1(")({a}h.*o");
+ pcrecpp::RE re("h.llo");
+ assert( re.FullMatch("hello") );
+ assert( !re1.FullMatch("hello") );
+
+
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ pcrecpp::RE part("dwi", options);
+ assert( part.PartialMatch("dwight") );
+ }
} rxtest;
diff --git a/db/matcher.h b/db/matcher.h
index 79bcea7cd0c..18b7d440bb1 100644
--- a/db/matcher.h
+++ b/db/matcher.h
@@ -4,42 +4,47 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
-#include <pcrecpp.h>
+#include <pcrecpp.h>
-class RegexMatcher {
+class RegexMatcher {
public:
- const char *fieldName;
- pcrecpp::RE *re;
+ const char *fieldName;
+ pcrecpp::RE *re;
bool inIndex;
- RegexMatcher() { re = 0; inIndex = false; }
- ~RegexMatcher() { delete re; }
+ RegexMatcher() {
+ re = 0;
+ inIndex = false;
+ }
+ ~RegexMatcher() {
+ delete re;
+ }
};
-class BasicMatcher {
+class BasicMatcher {
public:
BSONElement toMatch;
- int compareOp;
+ int compareOp;
bool inIndex;
};
// SQL where clause equivalent
-class Where;
+class Where;
/* Match BSON objects against a query pattern.
@@ -49,53 +54,55 @@ class Where;
{ a : 3 } is the pattern object.
GT/LT:
- { a : { $gt : 3 } }
+ { a : { $gt : 3 } }
Not equal:
- { a : { $ne : 3 } }
+ { a : { $ne : 3 } }
TODO: we should rewrite the matcher to be more an AST style.
*/
-class JSMatcher : boost::noncopyable {
- int matchesDotted(
- const char *fieldName,
- BSONElement& toMatch, BSONObj& obj,
- int compareOp, bool *deep, bool isArr = false);
-
- struct element_lt
- {
- bool operator()(const BSONElement& l, const BSONElement& r) const
- {
- int x = (int) l.type() - (int) r.type();
- if( x < 0 ) return true;
- if( x > 0 ) return false;
- return compareElementValues(l,r) < 0;
- }
- };
+class JSMatcher : boost::noncopyable {
+ int matchesDotted(
+ const char *fieldName,
+ BSONElement& toMatch, BSONObj& obj,
+ int compareOp, bool *deep, bool isArr = false);
+
+ struct element_lt
+ {
+ bool operator()(const BSONElement& l, const BSONElement& r) const
+ {
+ int x = (int) l.type() - (int) r.type();
+ if ( x < 0 ) return true;
+ if ( x > 0 ) return false;
+ return compareElementValues(l,r) < 0;
+ }
+ };
public:
- enum {
- Equality = 0,
- LT = 0x1,
- LTE = 0x3,
- GTE = 0x6,
- GT = 0x4,
- opIN = 0x8, // { x : { $in : [1,2,3] } }
+ enum {
+ Equality = 0,
+ LT = 0x1,
+ LTE = 0x3,
+ GTE = 0x6,
+ GT = 0x4,
+ opIN = 0x8, // { x : { $in : [1,2,3] } }
NE = 0x9
- };
+ };
- static int opDirection(int op) {
- return op <= LTE ? -1 : 1;
- }
+ static int opDirection(int op) {
+ return op <= LTE ? -1 : 1;
+ }
- JSMatcher(BSONObj& pattern, BSONObj indexKeyPattern);
+ JSMatcher(BSONObj& pattern, BSONObj indexKeyPattern);
- ~JSMatcher();
+ ~JSMatcher();
- /* deep - means we looked into arrays for a match
+ /* deep - means we looked into arrays for a match
*/
- bool matches(BSONObj& j, bool *deep = 0);
+ bool matches(BSONObj& j, bool *deep = 0);
- int getN() { return n; }
+ int getN() {
+ return n;
+ }
private:
void addBasic(BSONElement e, int c, BSONObj& indexKeyPattern) {
@@ -107,21 +114,21 @@ private:
n++;
}
- int valuesMatch(BSONElement& l, BSONElement& r, int op);
+ int valuesMatch(BSONElement& l, BSONElement& r, int op);
- set<BSONElement,element_lt> *in; // set if query uses $in
- Where *where; // set if query uses $where
- BSONObj& jsobj; // the query pattern. e.g., { name: "joe" }
+ set<BSONElement,element_lt> *in; // set if query uses $in
+ Where *where; // set if query uses $where
+ BSONObj& jsobj; // the query pattern. e.g., { name: "joe" }
vector<BasicMatcher> basics;
- int n; // # of basicmatcher items
+ int n; // # of basicmatcher items
- RegexMatcher regexs[4];
- int nRegex;
+ RegexMatcher regexs[4];
+ int nRegex;
- // so we delete the mem when we're done:
- BSONObjBuilder *builders[8];
- int nBuilders;
+ // so we delete the mem when we're done:
+ BSONObjBuilder *builders[8];
+ int nBuilders;
bool checkInIndex;
};
diff --git a/db/minilex.h b/db/minilex.h
index 637778441fc..4520d1e7ddf 100644
--- a/db/minilex.h
+++ b/db/minilex.h
@@ -3,109 +3,109 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
struct MiniLex {
- strhashmap reserved;
- bool ic[256]; // ic=Identifier Character
- bool starter[256];
+ strhashmap reserved;
+ bool ic[256]; // ic=Identifier Character
+ bool starter[256];
- // dm: very dumb about comments and escaped quotes -- but we are faster then at least,
- // albeit returning too much (which is ok for jsbobj current usage).
- void grabVariables(char *code /*modified and must stay in scope*/, strhashmap& vars) {
- char *p = code;
- char last = 0;
- while( *p ) {
- if( starter[*p] ) {
- char *q = p+1;
- while( *q && ic[*q] ) q++;
- const char *identifier = p;
- bool done = *q == 0;
- *q = 0;
- if( !reserved.count(identifier) ) {
- // we try to be smart about 'obj' but have to be careful as obj.obj
- // can happen; this is so that nFields is right for simplistic where cases
- // so we can stop scanning in jsobj when we find the field of interest.
- if( strcmp(identifier,"obj")==0 && p>code && p[-1] != '.' )
- ;
- else
- vars[identifier] = 1;
- }
- if( done )
- break;
- p = q + 1;
- continue;
- }
+ // dm: very dumb about comments and escaped quotes -- but we are faster then at least,
+ // albeit returning too much (which is ok for jsbobj current usage).
+ void grabVariables(char *code /*modified and must stay in scope*/, strhashmap& vars) {
+ char *p = code;
+ char last = 0;
+ while ( *p ) {
+ if ( starter[*p] ) {
+ char *q = p+1;
+ while ( *q && ic[*q] ) q++;
+ const char *identifier = p;
+ bool done = *q == 0;
+ *q = 0;
+ if ( !reserved.count(identifier) ) {
+ // we try to be smart about 'obj' but have to be careful as obj.obj
+ // can happen; this is so that nFields is right for simplistic where cases
+ // so we can stop scanning in jsobj when we find the field of interest.
+ if ( strcmp(identifier,"obj")==0 && p>code && p[-1] != '.' )
+ ;
+ else
+ vars[identifier] = 1;
+ }
+ if ( done )
+ break;
+ p = q + 1;
+ continue;
+ }
- if( *p == '\'' ) {
- p++;
- while( *p && *p != '\'' ) p++;
- }
- else if( *p == '"' ) {
- p++;
- while( *p && *p != '"' ) p++;
- }
- p++;
- }
- }
+ if ( *p == '\'' ) {
+ p++;
+ while ( *p && *p != '\'' ) p++;
+ }
+ else if ( *p == '"' ) {
+ p++;
+ while ( *p && *p != '"' ) p++;
+ }
+ p++;
+ }
+ }
- MiniLex() {
- strhashmap atest;
- atest["foo"] = 3;
- assert( atest.count("bar") == 0 );
- assert( atest.count("foo") == 1 );
- assert( atest["foo"] == 3 );
+ MiniLex() {
+ strhashmap atest;
+ atest["foo"] = 3;
+ assert( atest.count("bar") == 0 );
+ assert( atest.count("foo") == 1 );
+ assert( atest["foo"] == 3 );
- for( int i = 0; i < 256; i++ ) {
- ic[i] = starter[i] = false;
- }
- for( int i = 'a'; i <= 'z'; i++ )
- ic[i] = starter[i] = true;
- for( int i = 'A'; i <= 'Z'; i++ )
- ic[i] = starter[i] = true;
- for( int i = '0'; i <= '9'; i++ )
- ic[i] = true;
- for( int i = 128; i < 256; i++ )
- ic[i] = starter[i] = true;
- ic['$'] = starter['$'] = true;
- ic['_'] = starter['_'] = true;
+ for ( int i = 0; i < 256; i++ ) {
+ ic[i] = starter[i] = false;
+ }
+ for ( int i = 'a'; i <= 'z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = 'A'; i <= 'Z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = '0'; i <= '9'; i++ )
+ ic[i] = true;
+ for ( int i = 128; i < 256; i++ )
+ ic[i] = starter[i] = true;
+ ic['$'] = starter['$'] = true;
+ ic['_'] = starter['_'] = true;
- reserved["break"] = true;
- reserved["case"] = true;
- reserved["catch"] = true;
- reserved["continue"] = true;
- reserved["default"] = true;
- reserved["delete"] = true;
- reserved["do"] = true;
- reserved["else"] = true;
- reserved["finally"] = true;
- reserved["for"] = true;
- reserved["function"] = true;
- reserved["if"] = true;
- reserved["in"] = true;
- reserved["instanceof"] = true;
- reserved["new"] = true;
- reserved["return"] = true;
- reserved["switch"] = true;
- reserved["this"] = true;
- reserved["throw"] = true;
- reserved["try"] = true;
- reserved["typeof"] = true;
- reserved["var"] = true;
- reserved["void"] = true;
- reserved["while"] = true;
- reserved["with "] = true;
- }
+ reserved["break"] = true;
+ reserved["case"] = true;
+ reserved["catch"] = true;
+ reserved["continue"] = true;
+ reserved["default"] = true;
+ reserved["delete"] = true;
+ reserved["do"] = true;
+ reserved["else"] = true;
+ reserved["finally"] = true;
+ reserved["for"] = true;
+ reserved["function"] = true;
+ reserved["if"] = true;
+ reserved["in"] = true;
+ reserved["instanceof"] = true;
+ reserved["new"] = true;
+ reserved["return"] = true;
+ reserved["switch"] = true;
+ reserved["this"] = true;
+ reserved["throw"] = true;
+ reserved["try"] = true;
+ reserved["typeof"] = true;
+ reserved["var"] = true;
+ reserved["void"] = true;
+ reserved["while"] = true;
+ reserved["with "] = true;
+ }
};
diff --git a/db/namespace.cpp b/db/namespace.cpp
index 28562db266d..fb8f42547b3 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -26,37 +26,37 @@
#include <list>
#include "query.h"
-/* deleted lists -- linked lists of deleted records -- tehy are placed in 'buckets' of various sizes
+/* deleted lists -- linked lists of deleted records -- tehy are placed in 'buckets' of various sizes
so you can look for a deleterecord about the right size.
*/
-int bucketSizes[] = {
- 32, 64, 128, 256, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000,
- 0x8000, 0x10000, 0x20000, 0x40000, 0x80000, 0x100000, 0x200000,
- 0x400000, 0x800000
+int bucketSizes[] = {
+ 32, 64, 128, 256, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000,
+ 0x8000, 0x10000, 0x20000, 0x40000, 0x80000, 0x100000, 0x200000,
+ 0x400000, 0x800000
};
//NamespaceIndexMgr namespaceIndexMgr;
- /* returns true if we created (did not exist) during init() */
-bool NamespaceIndex::init(const char *dir, const char *database) {
- boost::filesystem::path path( dir );
+/* returns true if we created (did not exist) during init() */
+bool NamespaceIndex::init(const char *dir, const char *database) {
+ boost::filesystem::path path( dir );
path /= string( database ) + ".ns";
- bool created = !boost::filesystem::exists(path);
-
- /* if someone manually deleted the datafiles for a database,
- we need to be sure to clear any cached info for the database in
- local.*.
+ bool created = !boost::filesystem::exists(path);
+
+ /* if someone manually deleted the datafiles for a database,
+ we need to be sure to clear any cached info for the database in
+ local.*.
*/
- if( string("local") != database ) {
+ if ( string("local") != database ) {
DBInfo i(database);
i.dbDropped();
}
const int LEN = 16 * 1024 * 1024;
- string pathString = path.string();
+ string pathString = path.string();
void *p = f.map(pathString.c_str(), LEN);
- if( p == 0 ) {
+ if ( p == 0 ) {
problem() << "couldn't open namespace.idx " << pathString << " terminating" << endl;
exit(-3);
}
@@ -64,265 +64,268 @@ bool NamespaceIndex::init(const char *dir, const char *database) {
return created;
}
-void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
- {
- // defensive code: try to make us notice if we reference a deleted record
- (unsigned&) (((Record *) d)->data) = 0xeeeeeeee;
- }
-
- dassert( dloc.drec() == d );
- DEBUGGING cout << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs << endl;
- int b = bucket(d->lengthWithHeaders);
- DiskLoc& list = deletedList[b];
- DiskLoc oldHead = list;
- list = dloc;
- d->nextDeleted = oldHead;
+void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
+ {
+ // defensive code: try to make us notice if we reference a deleted record
+ (unsigned&) (((Record *) d)->data) = 0xeeeeeeee;
+ }
+
+ dassert( dloc.drec() == d );
+ DEBUGGING cout << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs << endl;
+ int b = bucket(d->lengthWithHeaders);
+ DiskLoc& list = deletedList[b];
+ DiskLoc oldHead = list;
+ list = dloc;
+ d->nextDeleted = oldHead;
}
-/*
- lenToAlloc is WITH header
+/*
+ lenToAlloc is WITH header
*/
DiskLoc NamespaceDetails::alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc) {
- lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
- DiskLoc loc = _alloc(ns, lenToAlloc);
- if( loc.isNull() )
- return loc;
-
- DeletedRecord *r = loc.drec();
-
- /* note we want to grab from the front so our next pointers on disk tend
- to go in a forward direction which is important for performance. */
- int regionlen = r->lengthWithHeaders;
- extentLoc.set(loc.a(), r->extentOfs);
- assert( r->extentOfs < loc.getOfs() );
-
- DEBUGGING cout << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
-
- int left = regionlen - lenToAlloc;
- if( left < 24 || (left < (lenToAlloc >> 3) && capped == 0) ) {
- // you get the whole thing.
- return loc;
- }
-
- /* split off some for further use. */
- r->lengthWithHeaders = lenToAlloc;
- DiskLoc newDelLoc = loc;
- newDelLoc.inc(lenToAlloc);
- DeletedRecord *newDel = newDelLoc.drec();
- newDel->extentOfs = r->extentOfs;
- newDel->lengthWithHeaders = left;
- newDel->nextDeleted.Null();
-
- addDeletedRec(newDel, newDelLoc);
-
- return loc;
+ lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
+ DiskLoc loc = _alloc(ns, lenToAlloc);
+ if ( loc.isNull() )
+ return loc;
+
+ DeletedRecord *r = loc.drec();
+
+ /* note we want to grab from the front so our next pointers on disk tend
+ to go in a forward direction which is important for performance. */
+ int regionlen = r->lengthWithHeaders;
+ extentLoc.set(loc.a(), r->extentOfs);
+ assert( r->extentOfs < loc.getOfs() );
+
+ DEBUGGING cout << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
+
+ int left = regionlen - lenToAlloc;
+ if ( left < 24 || (left < (lenToAlloc >> 3) && capped == 0) ) {
+ // you get the whole thing.
+ return loc;
+ }
+
+ /* split off some for further use. */
+ r->lengthWithHeaders = lenToAlloc;
+ DiskLoc newDelLoc = loc;
+ newDelLoc.inc(lenToAlloc);
+ DeletedRecord *newDel = newDelLoc.drec();
+ newDel->extentOfs = r->extentOfs;
+ newDel->lengthWithHeaders = left;
+ newDel->nextDeleted.Null();
+
+ addDeletedRec(newDel, newDelLoc);
+
+ return loc;
}
/* for non-capped collections.
- returned item is out of the deleted list upon return
+ returned item is out of the deleted list upon return
*/
DiskLoc NamespaceDetails::__stdAlloc(int len) {
- DiskLoc *prev;
- DiskLoc *bestprev = 0;
- DiskLoc bestmatch;
- int bestmatchlen = 0x7fffffff;
- int b = bucket(len);
- DiskLoc cur = deletedList[b]; prev = &deletedList[b];
- int extra = 5; // look for a better fit, a little.
- int chain = 0;
- while( 1 ) {
- {
- int a = cur.a();
- if( a < -1 || a >= 100000 ) {
- problem() << "~~ Assertion - cur out of range in _alloc() " << cur.toString() <<
- " b:" << b << " chain:" << chain << '\n';
- sayDbContext();
- if( cur == *prev )
- prev->Null();
- cur.Null();
- }
- }
- if( cur.isNull() ) {
- // move to next bucket. if we were doing "extra", just break
- if( bestmatchlen < 0x7fffffff )
- break;
- b++;
- if( b > MaxBucket ) {
- // out of space. alloc a new extent.
- return DiskLoc();
- }
- cur = deletedList[b]; prev = &deletedList[b];
- continue;
- }
- DeletedRecord *r = cur.drec();
- if( r->lengthWithHeaders >= len &&
- r->lengthWithHeaders < bestmatchlen ) {
- bestmatchlen = r->lengthWithHeaders;
- bestmatch = cur;
- bestprev = prev;
- }
- if( bestmatchlen < 0x7fffffff && --extra <= 0 )
- break;
- if( ++chain > 30 && b < MaxBucket ) {
- // too slow, force move to next bucket to grab a big chunk
- //b++;
- chain = 0;
- cur.Null();
- }
- else {
- if( r->nextDeleted.getOfs() == 0 ) {
- problem() << "~~ Assertion - bad nextDeleted " << r->nextDeleted.toString() <<
- " b:" << b << " chain:" << chain << ", fixing.\n";
- r->nextDeleted.Null();
- }
- cur = r->nextDeleted; prev = &r->nextDeleted;
- }
- }
-
- /* unlink ourself from the deleted list */
- {
- DeletedRecord *bmr = bestmatch.drec();
- *bestprev = bmr->nextDeleted;
- bmr->nextDeleted.setInvalid(); // defensive.
- assert(bmr->extentOfs < bestmatch.getOfs());
- }
-
- return bestmatch;
+ DiskLoc *prev;
+ DiskLoc *bestprev = 0;
+ DiskLoc bestmatch;
+ int bestmatchlen = 0x7fffffff;
+ int b = bucket(len);
+ DiskLoc cur = deletedList[b];
+ prev = &deletedList[b];
+ int extra = 5; // look for a better fit, a little.
+ int chain = 0;
+ while ( 1 ) {
+ {
+ int a = cur.a();
+ if ( a < -1 || a >= 100000 ) {
+ problem() << "~~ Assertion - cur out of range in _alloc() " << cur.toString() <<
+ " b:" << b << " chain:" << chain << '\n';
+ sayDbContext();
+ if ( cur == *prev )
+ prev->Null();
+ cur.Null();
+ }
+ }
+ if ( cur.isNull() ) {
+ // move to next bucket. if we were doing "extra", just break
+ if ( bestmatchlen < 0x7fffffff )
+ break;
+ b++;
+ if ( b > MaxBucket ) {
+ // out of space. alloc a new extent.
+ return DiskLoc();
+ }
+ cur = deletedList[b];
+ prev = &deletedList[b];
+ continue;
+ }
+ DeletedRecord *r = cur.drec();
+ if ( r->lengthWithHeaders >= len &&
+ r->lengthWithHeaders < bestmatchlen ) {
+ bestmatchlen = r->lengthWithHeaders;
+ bestmatch = cur;
+ bestprev = prev;
+ }
+ if ( bestmatchlen < 0x7fffffff && --extra <= 0 )
+ break;
+ if ( ++chain > 30 && b < MaxBucket ) {
+ // too slow, force move to next bucket to grab a big chunk
+ //b++;
+ chain = 0;
+ cur.Null();
+ }
+ else {
+ if ( r->nextDeleted.getOfs() == 0 ) {
+ problem() << "~~ Assertion - bad nextDeleted " << r->nextDeleted.toString() <<
+ " b:" << b << " chain:" << chain << ", fixing.\n";
+ r->nextDeleted.Null();
+ }
+ cur = r->nextDeleted;
+ prev = &r->nextDeleted;
+ }
+ }
+
+ /* unlink ourself from the deleted list */
+ {
+ DeletedRecord *bmr = bestmatch.drec();
+ *bestprev = bmr->nextDeleted;
+ bmr->nextDeleted.setInvalid(); // defensive.
+ assert(bmr->extentOfs < bestmatch.getOfs());
+ }
+
+ return bestmatch;
}
-void NamespaceDetails::dumpDeleted(set<DiskLoc> *extents) {
+void NamespaceDetails::dumpDeleted(set<DiskLoc> *extents) {
// cout << "DUMP deleted chains" << endl;
- for( int i = 0; i < Buckets; i++ ) {
+ for ( int i = 0; i < Buckets; i++ ) {
// cout << " bucket " << i << endl;
- DiskLoc dl = deletedList[i];
- while( !dl.isNull() ) {
- DeletedRecord *r = dl.drec();
- DiskLoc extLoc(dl.a(), r->extentOfs);
- if( extents == 0 || extents->count(extLoc) <= 0 ) {
- cout << " bucket " << i << endl;
- cout << " " << dl.toString() << " ext:" << extLoc.toString();
- if( extents && extents->count(extLoc) <= 0 )
- cout << '?';
- cout << " len:" << r->lengthWithHeaders << endl;
- }
- dl = r->nextDeleted;
- }
- }
+ DiskLoc dl = deletedList[i];
+ while ( !dl.isNull() ) {
+ DeletedRecord *r = dl.drec();
+ DiskLoc extLoc(dl.a(), r->extentOfs);
+ if ( extents == 0 || extents->count(extLoc) <= 0 ) {
+ cout << " bucket " << i << endl;
+ cout << " " << dl.toString() << " ext:" << extLoc.toString();
+ if ( extents && extents->count(extLoc) <= 0 )
+ cout << '?';
+ cout << " len:" << r->lengthWithHeaders << endl;
+ }
+ dl = r->nextDeleted;
+ }
+ }
// cout << endl;
}
/* combine adjacent deleted records
- this is O(n^2) but we call it for capped tables where typically n==1 or 2!
+ this is O(n^2) but we call it for capped tables where typically n==1 or 2!
(or 3...there will be a little unused sliver at the end of the extent.)
*/
-void NamespaceDetails::compact() {
- assert(capped);
- list<DiskLoc> drecs;
-
- for( int i = 0; i < Buckets; i++ ) {
- DiskLoc dl = deletedList[i];
- deletedList[i].Null();
- while( !dl.isNull() ) {
- DeletedRecord *r = dl.drec();
- drecs.push_back(dl);
- dl = r->nextDeleted;
- }
- }
-
- drecs.sort();
-
- list<DiskLoc>::iterator j = drecs.begin();
- assert( j != drecs.end() );
- DiskLoc a = *j;
- while( 1 ) {
- j++;
- if( j == drecs.end() ) {
- DEBUGGING cout << "TEMP: compact adddelrec\n";
- addDeletedRec(a.drec(), a);
- break;
- }
- DiskLoc b = *j;
- while( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
- // a & b are adjacent. merge.
- a.drec()->lengthWithHeaders += b.drec()->lengthWithHeaders;
- j++;
- if( j == drecs.end() ) {
- DEBUGGING cout << "temp: compact adddelrec2\n";
- addDeletedRec(a.drec(), a);
- return;
- }
- b = *j;
- }
- DEBUGGING cout << "temp: compact adddelrec3\n";
- addDeletedRec(a.drec(), a);
- a = b;
- }
+void NamespaceDetails::compact() {
+ assert(capped);
+ list<DiskLoc> drecs;
+
+ for ( int i = 0; i < Buckets; i++ ) {
+ DiskLoc dl = deletedList[i];
+ deletedList[i].Null();
+ while ( !dl.isNull() ) {
+ DeletedRecord *r = dl.drec();
+ drecs.push_back(dl);
+ dl = r->nextDeleted;
+ }
+ }
+
+ drecs.sort();
+
+ list<DiskLoc>::iterator j = drecs.begin();
+ assert( j != drecs.end() );
+ DiskLoc a = *j;
+ while ( 1 ) {
+ j++;
+ if ( j == drecs.end() ) {
+ DEBUGGING cout << "TEMP: compact adddelrec\n";
+ addDeletedRec(a.drec(), a);
+ break;
+ }
+ DiskLoc b = *j;
+ while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
+ // a & b are adjacent. merge.
+ a.drec()->lengthWithHeaders += b.drec()->lengthWithHeaders;
+ j++;
+ if ( j == drecs.end() ) {
+ DEBUGGING cout << "temp: compact adddelrec2\n";
+ addDeletedRec(a.drec(), a);
+ return;
+ }
+ b = *j;
+ }
+ DEBUGGING cout << "temp: compact adddelrec3\n";
+ addDeletedRec(a.drec(), a);
+ a = b;
+ }
}
/* alloc with capped table handling. */
int n_complaints_cap = 0;
DiskLoc NamespaceDetails::_alloc(const char *ns, int len) {
- if( !capped )
- return __stdAlloc(len);
-
- // capped.
-
- assert( len < 400000000 );
- int passes = 0;
- DiskLoc loc;
-
- // delete records until we have room and the max # objects limit achieved.
- Extent *theExtent = firstExtent.ext(); // only one extent if capped.
- dassert( theExtent->ns == ns );
- theExtent->assertOk();
- while( 1 ) {
- if( nrecords < max ) {
- loc = __stdAlloc(len);
- if( !loc.isNull() )
- break;
- }
-
- DiskLoc fr = theExtent->firstRecord;
- if( fr.isNull() ) {
- if( ++n_complaints_cap < 8 ) {
- cout << "couldn't make room for new record in capped ns " << ns << '\n'
- << " len: " << len << " extentsize:" << lastExtentSize << '\n';
- cout << " magic: " << hex << theExtent->magic << " extent->ns: " << theExtent->ns.buf << '\n';
- cout << " fr: " << theExtent->firstRecord.toString() <<
- " lr: " << theExtent->lastRecord.toString() << " extent->len: " << theExtent->length << '\n';
- assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
- }
- return DiskLoc();
- }
-
- theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true);
- compact();
- assert( ++passes < 5000 );
- }
-
- return loc;
+ if ( !capped )
+ return __stdAlloc(len);
+
+ // capped.
+
+ assert( len < 400000000 );
+ int passes = 0;
+ DiskLoc loc;
+
+ // delete records until we have room and the max # objects limit achieved.
+ Extent *theExtent = firstExtent.ext(); // only one extent if capped.
+ dassert( theExtent->ns == ns );
+ theExtent->assertOk();
+ while ( 1 ) {
+ if ( nrecords < max ) {
+ loc = __stdAlloc(len);
+ if ( !loc.isNull() )
+ break;
+ }
+
+ DiskLoc fr = theExtent->firstRecord;
+ if ( fr.isNull() ) {
+ if ( ++n_complaints_cap < 8 ) {
+ cout << "couldn't make room for new record in capped ns " << ns << '\n'
+ << " len: " << len << " extentsize:" << lastExtentSize << '\n';
+ cout << " magic: " << hex << theExtent->magic << " extent->ns: " << theExtent->ns.buf << '\n';
+ cout << " fr: " << theExtent->firstRecord.toString() <<
+ " lr: " << theExtent->lastRecord.toString() << " extent->len: " << theExtent->length << '\n';
+ assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
+ }
+ return DiskLoc();
+ }
+
+ theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true);
+ compact();
+ assert( ++passes < 5000 );
+ }
+
+ return loc;
}
/* you MUST call when adding an index. see pdfile.cpp */
-void NamespaceDetails::addingIndex(const char *thisns, IndexDetails& details) {
- assert( nsdetails(thisns) == this );
- assert( &details == &indexes[nIndexes] );
- nIndexes++;
- NamespaceDetailsTransient::get(thisns).addedIndex();
+void NamespaceDetails::addingIndex(const char *thisns, IndexDetails& details) {
+ assert( nsdetails(thisns) == this );
+ assert( &details == &indexes[nIndexes] );
+ nIndexes++;
+ NamespaceDetailsTransient::get(thisns).addedIndex();
}
-/* returns index of the first index in which the field is present. -1 if not present.
+/* returns index of the first index in which the field is present. -1 if not present.
(aug08 - this method not currently used)
*/
int NamespaceDetails::fieldIsIndexed(const char *fieldName) {
- for( int i = 0; i < nIndexes; i++ ) {
- IndexDetails& idx = indexes[i];
- BSONObj idxKey = idx.info.obj().getObjectField("key"); // e.g., { ts : -1 }
- if( !idxKey.findElement(fieldName).eoo() )
- return i;
- }
- return -1;
+ for ( int i = 0; i < nIndexes; i++ ) {
+ IndexDetails& idx = indexes[i];
+ BSONObj idxKey = idx.info.obj().getObjectField("key"); // e.g., { ts : -1 }
+ if ( !idxKey.findElement(fieldName).eoo() )
+ return i;
+ }
+ return -1;
}
/* ------------------------------------------------------------------------- */
@@ -330,16 +333,16 @@ int NamespaceDetails::fieldIsIndexed(const char *fieldName) {
map<const char *,NamespaceDetailsTransient*> NamespaceDetailsTransient::map;
typedef map<const char *,NamespaceDetailsTransient*>::iterator ouriter;
-NamespaceDetailsTransient& NamespaceDetailsTransient::get(const char *ns) {
- NamespaceDetailsTransient*& t = map[ns];
- if( t == 0 )
- t = new NamespaceDetailsTransient(ns);
- return *t;
+NamespaceDetailsTransient& NamespaceDetailsTransient::get(const char *ns) {
+ NamespaceDetailsTransient*& t = map[ns];
+ if ( t == 0 )
+ t = new NamespaceDetailsTransient(ns);
+ return *t;
}
void NamespaceDetailsTransient::computeIndexKeys() {
- NamespaceDetails *d = nsdetails(ns.c_str());
- for( int i = 0; i < d->nIndexes; i++ ) {
+ NamespaceDetails *d = nsdetails(ns.c_str());
+ for ( int i = 0; i < d->nIndexes; i++ ) {
// set<string> fields;
d->indexes[i].keyPattern().getFieldNames(allIndexKeys);
// allIndexKeys.insert(fields.begin(),fields.end());
@@ -352,24 +355,24 @@ void NamespaceDetailsTransient::computeIndexKeys() {
options: { capped : ..., size : ... }
*/
void addNewNamespaceToCatalog(const char *ns, BSONObj *options = 0) {
- OCCASIONALLY log() << "New namespace: " << ns << '\n';
- if( strstr(ns, "system.namespaces") ) {
- // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
- // TODO: fix above should not be strstr!
- return;
- }
-
- {
- BSONObjBuilder b;
- b.append("name", ns);
- if( options )
- b.append("options", *options);
- BSONObj j = b.done();
- char database[256];
- nsToClient(ns, database);
- string s = database;
- s += ".system.namespaces";
- theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
- }
+ OCCASIONALLY log() << "New namespace: " << ns << '\n';
+ if ( strstr(ns, "system.namespaces") ) {
+ // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
+ // TODO: fix above should not be strstr!
+ return;
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append("name", ns);
+ if ( options )
+ b.append("options", *options);
+ BSONObj j = b.done();
+ char database[256];
+ nsToClient(ns, database);
+ string s = database;
+ s += ".system.namespaces";
+ theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
+ }
}
diff --git a/db/namespace.h b/db/namespace.h
index 9993b0b1afa..11eda939705 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -32,107 +32,112 @@ class Cursor;
class Namespace {
public:
- Namespace(const char *ns) {
- *this = ns;
- }
- Namespace& operator=(const char *ns) {
- memset(buf, 0, 128); /* this is just to keep stuff clean in the files for easy dumping and reading */
- strcpy_s(buf, 128, ns); return *this;
- }
-
- void kill() {
- buf[0] = 0x7f;
- }
-
- bool operator==(const char *r) { return strcmp(buf, r) == 0; }
- bool operator==(const Namespace& r) { return strcmp(buf, r.buf) == 0; }
- int hash() const {
- unsigned x = 0;
- const char *p = buf;
- while( *p ) {
- x = x * 131 + *p;
- p++;
- }
- return (x & 0x7fffffff) | 0x8000000; // must be > 0
- }
-
- char buf[128];
+ Namespace(const char *ns) {
+ *this = ns;
+ }
+ Namespace& operator=(const char *ns) {
+ memset(buf, 0, 128); /* this is just to keep stuff clean in the files for easy dumping and reading */
+ strcpy_s(buf, 128, ns);
+ return *this;
+ }
+
+ void kill() {
+ buf[0] = 0x7f;
+ }
+
+ bool operator==(const char *r) {
+ return strcmp(buf, r) == 0;
+ }
+ bool operator==(const Namespace& r) {
+ return strcmp(buf, r.buf) == 0;
+ }
+ int hash() const {
+ unsigned x = 0;
+ const char *p = buf;
+ while ( *p ) {
+ x = x * 131 + *p;
+ p++;
+ }
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+ }
+
+ char buf[128];
};
const int Buckets = 19;
const int MaxBucket = 18;
const int MaxIndexes = 10;
-class IndexDetails {
+class IndexDetails {
public:
- DiskLoc head; /* btree head */
+ DiskLoc head; /* btree head */
- /* Location of index info object. Format:
+ /* Location of index info object. Format:
- { name:"nameofindex", ns:"parentnsname", key: {keypattobject} }
+ { name:"nameofindex", ns:"parentnsname", key: {keypattobject} }
- This object is in the system.indexes collection. Note that since we
- have a pointer to the object here, the object in system.indexes must
- never move.
- */
- DiskLoc info;
+ This object is in the system.indexes collection. Note that since we
+ have a pointer to the object here, the object in system.indexes must
+ never move.
+ */
+ DiskLoc info;
- /* extract key value from the query object
+ /* extract key value from the query object
e.g., if key() == { x : 1 },
{ x : 70, y : 3 } -> { x : 70 }
handles our embedded dot notation too.
*/
- BSONObj getKeyFromQuery(BSONObj& query) {
+ BSONObj getKeyFromQuery(BSONObj& query) {
BSONObj k = keyPattern();
BSONObj res = query.extractFieldsUnDotted(k);
assert(res.objsize() != 0); // guard against a seg fault if details is 0
return res;
}
- /* pull out the relevant key objects from obj, so we
- can index them. Note that the set is multiple elements
- only when it's a "multikey" array.
+ /* pull out the relevant key objects from obj, so we
+ can index them. Note that the set is multiple elements
+ only when it's a "multikey" array.
keys will be left empty if key not found in the object.
- */
- void getKeysFromObject( const BSONObj& obj, set<BSONObj>& keys) const;
+ */
+ void getKeysFromObject( const BSONObj& obj, set<BSONObj>& keys) const;
- /* get the key pattern for this object.
+ /* get the key pattern for this object.
e.g., { lastname:1, firstname:1 }
*/
- BSONObj keyPattern() const {
+ BSONObj keyPattern() const {
return info.obj().getObjectField("key");
}
// returns name of this index's storage area
- // database.table.$index
- string indexNamespace() {
- BSONObj io = info.obj();
- string s;
- s.reserve(128);
- s = io.getStringField("ns");
- assert( !s.empty() );
- s += ".$";
- s += io.getStringField("name");
- return s;
- }
-
- string indexName() const { // e.g. "ts_1"
- BSONObj io = info.obj();
- return io.getStringField("name");
- }
-
- /* gets not our namespace name (indexNamespace for that),
- but the collection we index, its name.
- */
- string parentNS() const {
- BSONObj io = info.obj();
- return io.getStringField("ns");
- }
-
- /* delete this index. does NOT celan up the system catalog
- (system.indexes or system.namespaces) -- only NamespaceIndex.
- */
- void kill();
+ // database.table.$index
+ string indexNamespace() {
+ BSONObj io = info.obj();
+ string s;
+ s.reserve(128);
+ s = io.getStringField("ns");
+ assert( !s.empty() );
+ s += ".$";
+ s += io.getStringField("name");
+ return s;
+ }
+
+ string indexName() const { // e.g. "ts_1"
+ BSONObj io = info.obj();
+ return io.getStringField("name");
+ }
+
+ /* gets not our namespace name (indexNamespace for that),
+ but the collection we index, its name.
+ */
+ string parentNS() const {
+ BSONObj io = info.obj();
+ return io.getStringField("ns");
+ }
+
+ /* delete this index. does NOT celan up the system catalog
+ (system.indexes or system.namespaces) -- only NamespaceIndex.
+ */
+ void kill();
};
extern int bucketSizes[];
@@ -141,88 +146,90 @@ extern int bucketSizes[];
*/
class NamespaceDetails {
public:
- NamespaceDetails() {
- /* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
- datasize = nrecords = 0;
- lastExtentSize = 0;
- nIndexes = 0;
- capped = 0;
- max = 0x7fffffff;
- paddingFactor = 1.0;
- flags = 0;
- memset(reserved, 0, sizeof(reserved));
- }
- DiskLoc firstExtent;
- DiskLoc lastExtent;
- DiskLoc deletedList[Buckets];
- long long datasize;
- long long nrecords;
- int lastExtentSize;
- int nIndexes;
- IndexDetails indexes[MaxIndexes];
- int capped;
- int max; // max # of objects for a capped table.
- double paddingFactor; // 1.0 = no padding.
- int flags;
- char reserved[256-16-4-4-8*MaxIndexes-8-8-8-4];
-
- enum {
- Flag_HaveIdIndex = 1 // set when we have _id index (ONLY if ensureIdIndex was called -- 0 if that has never been called)
- };
-
- /* you MUST call when adding an index. see pdfile.cpp */
- void addingIndex(const char *thisns, IndexDetails& details);
-
- void aboutToDeleteAnIndex() { flags &= ~Flag_HaveIdIndex; }
-
- /* returns index of the first index in which the field is present. -1 if not present. */
- int fieldIsIndexed(const char *fieldName);
-
- void paddingFits() {
- double x = paddingFactor - 0.01;
- if( x >= 1.0 )
- paddingFactor = x;
- }
- void paddingTooSmall() {
- double x = paddingFactor + 0.6;
- if( x <= 2.0 )
- paddingFactor = x;
- }
-
- //returns offset in indexes[]
- int findIndexByName(const char *name) {
- for( int i = 0; i < nIndexes; i++ ) {
- if( strcmp(indexes[i].info.obj().getStringField("name"),name) == 0 )
- return i;
- }
- return -1;
- }
-
- /* return which "deleted bucket" for this size object */
- static int bucket(int n) {
- for( int i = 0; i < Buckets; i++ )
- if( bucketSizes[i] > n )
- return i;
- return Buckets-1;
- }
-
- /* allocate a new record. lenToAlloc includes headers. */
- DiskLoc alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc);
-
- /* add a given record to the deleted chains for this NS */
- void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
-
- void dumpDeleted(set<DiskLoc> *extents = 0);
+ NamespaceDetails() {
+ /* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
+ datasize = nrecords = 0;
+ lastExtentSize = 0;
+ nIndexes = 0;
+ capped = 0;
+ max = 0x7fffffff;
+ paddingFactor = 1.0;
+ flags = 0;
+ memset(reserved, 0, sizeof(reserved));
+ }
+ DiskLoc firstExtent;
+ DiskLoc lastExtent;
+ DiskLoc deletedList[Buckets];
+ long long datasize;
+ long long nrecords;
+ int lastExtentSize;
+ int nIndexes;
+ IndexDetails indexes[MaxIndexes];
+ int capped;
+ int max; // max # of objects for a capped table.
+ double paddingFactor; // 1.0 = no padding.
+ int flags;
+ char reserved[256-16-4-4-8*MaxIndexes-8-8-8-4];
+
+ enum {
+ Flag_HaveIdIndex = 1 // set when we have _id index (ONLY if ensureIdIndex was called -- 0 if that has never been called)
+ };
+
+ /* you MUST call when adding an index. see pdfile.cpp */
+ void addingIndex(const char *thisns, IndexDetails& details);
+
+ void aboutToDeleteAnIndex() {
+ flags &= ~Flag_HaveIdIndex;
+ }
+
+ /* returns index of the first index in which the field is present. -1 if not present. */
+ int fieldIsIndexed(const char *fieldName);
+
+ void paddingFits() {
+ double x = paddingFactor - 0.01;
+ if ( x >= 1.0 )
+ paddingFactor = x;
+ }
+ void paddingTooSmall() {
+ double x = paddingFactor + 0.6;
+ if ( x <= 2.0 )
+ paddingFactor = x;
+ }
+
+ //returns offset in indexes[]
+ int findIndexByName(const char *name) {
+ for ( int i = 0; i < nIndexes; i++ ) {
+ if ( strcmp(indexes[i].info.obj().getStringField("name"),name) == 0 )
+ return i;
+ }
+ return -1;
+ }
+
+ /* return which "deleted bucket" for this size object */
+ static int bucket(int n) {
+ for ( int i = 0; i < Buckets; i++ )
+ if ( bucketSizes[i] > n )
+ return i;
+ return Buckets-1;
+ }
+
+ /* allocate a new record. lenToAlloc includes headers. */
+ DiskLoc alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc);
+
+ /* add a given record to the deleted chains for this NS */
+ void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
+
+ void dumpDeleted(set<DiskLoc> *extents = 0);
private:
- DiskLoc __stdAlloc(int len);
- DiskLoc _alloc(const char *ns, int len);
- void compact();
+ DiskLoc __stdAlloc(int len);
+ DiskLoc _alloc(const char *ns, int len);
+ void compact();
};
#pragma pack(pop)
-/* these are things we know / compute about a namespace that are transient -- things
- we don't actually store in the .ns file. so mainly caching of frequently used
+/* these are things we know / compute about a namespace that are transient -- things
+ we don't actually store in the .ns file. so mainly caching of frequently used
information.
CAUTION: Are you maintaining this properly on a collection drop()? A dropdatabase()? Be careful.
@@ -230,92 +237,99 @@ private:
as currently used that does not cause anything terrible to happen.
*/
class NamespaceDetailsTransient : boost::noncopyable {
- string ns;
- bool haveIndexKeys;
- set<string> allIndexKeys;
- void computeIndexKeys();
+ string ns;
+ bool haveIndexKeys;
+ set<string> allIndexKeys;
+ void computeIndexKeys();
public:
- NamespaceDetailsTransient(const char *_ns) : ns(_ns) { haveIndexKeys=false; /*lazy load them*/ }
+ NamespaceDetailsTransient(const char *_ns) : ns(_ns) {
+ haveIndexKeys=false; /*lazy load them*/
+ }
- /* get set of index keys for this namespace. handy to quickly check if a given
- field is indexed (Note it might be a seconary component of a compound index.)
- */
- set<string>& indexKeys() {
- if( !haveIndexKeys ) { haveIndexKeys=true; computeIndexKeys(); }
- return allIndexKeys;
- }
+ /* get set of index keys for this namespace. handy to quickly check if a given
+ field is indexed (Note it might be a seconary component of a compound index.)
+ */
+ set<string>& indexKeys() {
+ if ( !haveIndexKeys ) {
+ haveIndexKeys=true;
+ computeIndexKeys();
+ }
+ return allIndexKeys;
+ }
- void addedIndex() { haveIndexKeys=false; }
+ void addedIndex() {
+ haveIndexKeys=false;
+ }
private:
- static std::map<const char *,NamespaceDetailsTransient*> map;
+ static std::map<const char *,NamespaceDetailsTransient*> map;
public:
- static NamespaceDetailsTransient& get(const char *ns);
+ static NamespaceDetailsTransient& get(const char *ns);
};
-/* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
+/* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
if you will: at least the core parts. (Additional info in system.* collections.)
*/
class NamespaceIndex {
- friend class NamespaceCursor;
+ friend class NamespaceCursor;
public:
- NamespaceIndex() { }
-
- /* returns true if we created (did not exist) during init() */
- bool init(const char *dir, const char *database);
-
- void add(const char *ns, DiskLoc& loc) {
- Namespace n(ns);
- NamespaceDetails details;
- details.lastExtent = details.firstExtent = loc;
- ht->put(n, details);
- }
-
- /* just for diagnostics */
- size_t detailsOffset(NamespaceDetails *d) {
- return ((char *) d) - (char *) ht->nodes;
- }
-
- NamespaceDetails* details(const char *ns) {
- Namespace n(ns);
- return ht->get(n);
- }
-
- void kill(const char *ns) {
- Namespace n(ns);
- ht->kill(n);
- }
-
- bool find(const char *ns, DiskLoc& loc) {
- NamespaceDetails *l = details(ns);
- if( l ) {
- loc = l->firstExtent;
- return true;
- }
- return false;
- }
+ NamespaceIndex() { }
+
+ /* returns true if we created (did not exist) during init() */
+ bool init(const char *dir, const char *database);
+
+ void add(const char *ns, DiskLoc& loc) {
+ Namespace n(ns);
+ NamespaceDetails details;
+ details.lastExtent = details.firstExtent = loc;
+ ht->put(n, details);
+ }
+
+ /* just for diagnostics */
+ size_t detailsOffset(NamespaceDetails *d) {
+ return ((char *) d) - (char *) ht->nodes;
+ }
+
+ NamespaceDetails* details(const char *ns) {
+ Namespace n(ns);
+ return ht->get(n);
+ }
+
+ void kill(const char *ns) {
+ Namespace n(ns);
+ ht->kill(n);
+ }
+
+ bool find(const char *ns, DiskLoc& loc) {
+ NamespaceDetails *l = details(ns);
+ if ( l ) {
+ loc = l->firstExtent;
+ return true;
+ }
+ return false;
+ }
private:
- MemoryMappedFile f;
- HashTable<Namespace,NamespaceDetails> *ht;
+ MemoryMappedFile f;
+ HashTable<Namespace,NamespaceDetails> *ht;
};
extern const char *dbpath;
// "database.a.b.c" -> "database"
const int MaxClientLen = 256;
-inline void nsToClient(const char *ns, char *database) {
- const char *p = ns;
- char *q = database;
- while( *p != '.' ) {
- if( *p == 0 )
+inline void nsToClient(const char *ns, char *database) {
+ const char *p = ns;
+ char *q = database;
+ while ( *p != '.' ) {
+ if ( *p == 0 )
break;
- *q++ = *p++;
- }
- *q = 0;
- if(q-database>=MaxClientLen) {
- problem() << "nsToClient: ns too long. terminating, buf overrun condition" << endl;
- dbexit(60);
- }
+ *q++ = *p++;
+ }
+ *q = 0;
+ if (q-database>=MaxClientLen) {
+ problem() << "nsToClient: ns too long. terminating, buf overrun condition" << endl;
+ dbexit(60);
+ }
}
inline string nsToClient(const char *ns) {
char buf[MaxClientLen];
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 1f3f959c8db..10b2289af96 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -2,22 +2,22 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*
-todo:
+/*
+todo:
_ table scans must be sequential, not next/prev pointers
_ coalesce deleted
@@ -51,11 +51,11 @@ int callDepth = 0;
extern int otherTraceLevel;
void addNewNamespaceToCatalog(const char *ns, BSONObj *options = 0);
-string getDbContext() {
+string getDbContext() {
stringstream ss;
- if( database ) {
+ if ( database ) {
ss << database->name << ' ';
- if( curNs )
+ if ( curNs )
ss << curNs << ' ';
}
ss<< "op:" << curOp << ' ' << callDepth;
@@ -65,215 +65,217 @@ string getDbContext() {
/* this is a good place to set a breakpoint when debugging, as lots of warning things
(assert, wassert) call it.
*/
-void sayDbContext(const char *errmsg) {
- if( errmsg ) {
- problem() << errmsg << endl;
- }
+void sayDbContext(const char *errmsg) {
+ if ( errmsg ) {
+ problem() << errmsg << endl;
+ }
log() << ' ' << getDbContext() << '\n';
- printStackTrace();
+ printStackTrace();
}
-BSONObj::BSONObj(Record *r) {
- init(r->data, false);
-/*
- _objdata = r->data;
- _objsize = *((int*) _objdata);
- if( _objsize > r->netLength() ) {
- cout << "About to assert fail _objsize <= r->netLength()" << endl;
- cout << " _objsize: " << _objsize << endl;
- cout << " netLength(): " << r->netLength() << endl;
- cout << " extentOfs: " << r->extentOfs << endl;
- cout << " nextOfs: " << r->nextOfs << endl;
- cout << " prevOfs: " << r->prevOfs << endl;
- assert( _objsize <= r->netLength() );
- }
- iFree = false;
-*/
+BSONObj::BSONObj(Record *r) {
+ init(r->data, false);
+ /*
+ _objdata = r->data;
+ _objsize = *((int*) _objdata);
+ if( _objsize > r->netLength() ) {
+ cout << "About to assert fail _objsize <= r->netLength()" << endl;
+ cout << " _objsize: " << _objsize << endl;
+ cout << " netLength(): " << r->netLength() << endl;
+ cout << " extentOfs: " << r->extentOfs << endl;
+ cout << " nextOfs: " << r->nextOfs << endl;
+ cout << " prevOfs: " << r->prevOfs << endl;
+ assert( _objsize <= r->netLength() );
+ }
+ iFree = false;
+ */
}
-/*---------------------------------------------------------------------*/
-
-int initialExtentSize(int len) {
- long long sz = len * 16;
- if( len < 1000 ) sz = len * 64;
- if( sz > 1000000000 )
- sz = 1000000000;
- int z = ((int)sz) & 0xffffff00;
- assert( z > len );
- DEV log() << "initialExtentSize(" << len << ") returns " << z << endl;
- return z;
+/*---------------------------------------------------------------------*/
+
+int initialExtentSize(int len) {
+ long long sz = len * 16;
+ if ( len < 1000 ) sz = len * 64;
+ if ( sz > 1000000000 )
+ sz = 1000000000;
+ int z = ((int)sz) & 0xffffff00;
+ assert( z > len );
+ DEV log() << "initialExtentSize(" << len << ") returns " << z << endl;
+ return z;
}
-bool _userCreateNS(const char *ns, BSONObj& j, string& err) {
- if( nsdetails(ns) ) {
- err = "collection already exists";
- return false;
- }
+bool _userCreateNS(const char *ns, BSONObj& j, string& err) {
+ if ( nsdetails(ns) ) {
+ err = "collection already exists";
+ return false;
+ }
- log() << "create collection " << ns << ' ' << j.toString() << endl;
+ log() << "create collection " << ns << ' ' << j.toString() << endl;
- /* todo: do this only when we have allocated space successfully? or we could insert with a { ok: 0 } field
+ /* todo: do this only when we have allocated space successfully? or we could insert with a { ok: 0 } field
and then go back and set to ok : 1 after we are done.
- */
+ */
addNewNamespaceToCatalog(ns, j.isEmpty() ? 0 : &j);
- int ies = initialExtentSize(128);
- BSONElement e = j.findElement("size");
- if( e.isNumber() ) {
- ies = (int) e.number();
- ies += 256;
- ies &= 0xffffff00;
- if( ies > 1024 * 1024 * 1024 + 256 ) return false;
- }
+ int ies = initialExtentSize(128);
+ BSONElement e = j.findElement("size");
+ if ( e.isNumber() ) {
+ ies = (int) e.number();
+ ies += 256;
+ ies &= 0xffffff00;
+ if ( ies > 1024 * 1024 * 1024 + 256 ) return false;
+ }
- database->suitableFile(ies)->newExtent(ns, ies);
- NamespaceDetails *d = nsdetails(ns);
- assert(d);
-
- e = j.findElement("capped");
- if( e.type() == Bool && e.boolean() ) {
- d->capped = 1;
- e = j.findElement("max");
- if( e.isNumber() ) {
- int mx = (int) e.number();
- if( mx > 0 )
- d->max = mx;
- }
- }
+ database->suitableFile(ies)->newExtent(ns, ies);
+ NamespaceDetails *d = nsdetails(ns);
+ assert(d);
+
+ e = j.findElement("capped");
+ if ( e.type() == Bool && e.boolean() ) {
+ d->capped = 1;
+ e = j.findElement("max");
+ if ( e.isNumber() ) {
+ int mx = (int) e.number();
+ if ( mx > 0 )
+ d->max = mx;
+ }
+ }
- return true;
+ return true;
}
// { ..., capped: true, size: ..., max: ... }
// returns true if successful
-bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication) {
+bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication) {
j.validateEmpty();
bool ok = _userCreateNS(ns, j, err);
- if( logForReplication && ok )
+ if ( logForReplication && ok )
logOp("c", ns, j);
return ok;
}
-/*---------------------------------------------------------------------*/
+/*---------------------------------------------------------------------*/
void PhysicalDataFile::open(int fn, const char *filename) {
{
- /* check quotas
- very simple temporary implementation - we will in future look up
+ /* check quotas
+ very simple temporary implementation - we will in future look up
the quota from the grid database
*/
- if( quota && fn > 8 && !boost::filesystem::exists(filename) ) {
- /* todo: if we were adding / changing keys in an index did we do some
- work previously that needs cleaning up? Possible. We should
- check code like that and have it catch the exception and do
+ if ( quota && fn > 8 && !boost::filesystem::exists(filename) ) {
+ /* todo: if we were adding / changing keys in an index did we do some
+ work previously that needs cleaning up? Possible. We should
+ check code like that and have it catch the exception and do
something reasonable.
*/
string s = "db disk space quota exceeded ";
- if( database )
+ if ( database )
s += database->name;
uasserted(s.c_str());
}
}
- int length;
-
- if( fn <= 4 ) {
- length = (64*1024*1024) << fn;
- if( strstr(filename, "alleyinsider") && length < 1024 * 1024 * 1024 ) {
- DEV cout << "Warning: not making alleyinsider datafile bigger because DEV is true" << endl;
- else
- length = 1024 * 1024 * 1024;
- }
- } else
- length = 0x7ff00000;
-
+ int length;
+
+ if ( fn <= 4 ) {
+ length = (64*1024*1024) << fn;
+ if ( strstr(filename, "alleyinsider") && length < 1024 * 1024 * 1024 ) {
+ DEV cout << "Warning: not making alleyinsider datafile bigger because DEV is true" << endl;
+ else
+ length = 1024 * 1024 * 1024;
+ }
+ } else
+ length = 0x7ff00000;
+
if ( sizeof( int* ) == 4 && fn > 4 )
- length = 512 * 1024 * 1024;
-
- assert( length >= 64*1024*1024 );
-
- if( strstr(filename, "_hudsonSmall") ) {
- int mult = 1;
- if ( fn > 1 && fn < 1000 )
- mult = fn;
- length = 1024 * 512 * mult;
- log() << "Warning : using small files for _hudsonSmall" << endl;
- }
- assert( length % 4096 == 0 );
+ length = 512 * 1024 * 1024;
+
+ assert( length >= 64*1024*1024 );
+
+ if ( strstr(filename, "_hudsonSmall") ) {
+ int mult = 1;
+ if ( fn > 1 && fn < 1000 )
+ mult = fn;
+ length = 1024 * 512 * mult;
+ log() << "Warning : using small files for _hudsonSmall" << endl;
+ }
+ assert( length % 4096 == 0 );
- assert(fn == fileNo);
- header = (PDFHeader *) mmf.map(filename, length);
- uassert("can't map file memory", header);
- header->init(fileNo, length);
+ assert(fn == fileNo);
+ header = (PDFHeader *) mmf.map(filename, length);
+ uassert("can't map file memory", header);
+ header->init(fileNo, length);
}
/* prev - previous extent for this namespace. null=this is the first one. */
Extent* PhysicalDataFile::newExtent(const char *ns, int approxSize, int loops) {
- assert( approxSize >= 0 && approxSize <= 0x7ff00000 );
-
- assert( header ); // null if file open failed
- int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength;
- DiskLoc loc;
- if( ExtentSize <= 0 ) {
- /* not there could be a lot of looping here is db just started and
- no files are open yet. we might want to do something about that. */
- if( loops > 8 ) {
- assert( loops < 10000 );
- cout << "warning: loops=" << loops << " fileno:" << fileNo << ' ' << ns << '\n';
- }
- log() << "newExtent: " << ns << " file " << fileNo << " full, adding a new file\n";
- return database->addAFile()->newExtent(ns, approxSize, loops+1);
- }
- int offset = header->unused.getOfs();
- header->unused.setOfs( fileNo, offset + ExtentSize );
- header->unusedLength -= ExtentSize;
- loc.setOfs(fileNo, offset);
- Extent *e = _getExtent(loc);
- DiskLoc emptyLoc = e->init(ns, ExtentSize, fileNo, offset);
-
- DiskLoc oldExtentLoc;
- NamespaceIndex *ni = nsindex(ns);
- NamespaceDetails *details = ni->details(ns);
- if( details ) {
- assert( !details->firstExtent.isNull() );
- e->xprev = details->lastExtent;
- details->lastExtent.ext()->xnext = loc;
- details->lastExtent = loc;
- }
- else {
- ni->add(ns, loc);
- details = ni->details(ns);
- }
+ assert( approxSize >= 0 && approxSize <= 0x7ff00000 );
+
+ assert( header ); // null if file open failed
+ int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength;
+ DiskLoc loc;
+ if ( ExtentSize <= 0 ) {
+ /* not there could be a lot of looping here is db just started and
+ no files are open yet. we might want to do something about that. */
+ if ( loops > 8 ) {
+ assert( loops < 10000 );
+ cout << "warning: loops=" << loops << " fileno:" << fileNo << ' ' << ns << '\n';
+ }
+ log() << "newExtent: " << ns << " file " << fileNo << " full, adding a new file\n";
+ return database->addAFile()->newExtent(ns, approxSize, loops+1);
+ }
+ int offset = header->unused.getOfs();
+ header->unused.setOfs( fileNo, offset + ExtentSize );
+ header->unusedLength -= ExtentSize;
+ loc.setOfs(fileNo, offset);
+ Extent *e = _getExtent(loc);
+ DiskLoc emptyLoc = e->init(ns, ExtentSize, fileNo, offset);
+
+ DiskLoc oldExtentLoc;
+ NamespaceIndex *ni = nsindex(ns);
+ NamespaceDetails *details = ni->details(ns);
+ if ( details ) {
+ assert( !details->firstExtent.isNull() );
+ e->xprev = details->lastExtent;
+ details->lastExtent.ext()->xnext = loc;
+ details->lastExtent = loc;
+ }
+ else {
+ ni->add(ns, loc);
+ details = ni->details(ns);
+ }
- details->lastExtentSize = approxSize;
- DEBUGGING cout << "temp: newextent adddelrec " << ns << endl;
- details->addDeletedRec(emptyLoc.drec(), emptyLoc);
+ details->lastExtentSize = approxSize;
+ DEBUGGING cout << "temp: newextent adddelrec " << ns << endl;
+ details->addDeletedRec(emptyLoc.drec(), emptyLoc);
- DEV log() << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
- << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
- return e;
+ DEV log() << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
+ << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
+ return e;
}
-/*---------------------------------------------------------------------*/
+/*---------------------------------------------------------------------*/
/* assumes already zeroed -- insufficient for block 'reuse' perhaps */
-DiskLoc Extent::init(const char *nsname, int _length, int _fileNo, int _offset) {
- magic = 0x41424344;
- myLoc.setOfs(_fileNo, _offset);
- xnext.Null(); xprev.Null();
- ns = nsname;
- length = _length;
- firstRecord.Null(); lastRecord.Null();
-
- DiskLoc emptyLoc = myLoc;
- emptyLoc.inc( (extentData-(char*)this) );
-
- DeletedRecord *empty1 = (DeletedRecord *) extentData;
- DeletedRecord *empty = (DeletedRecord *) getRecord(emptyLoc);
- assert( empty == empty1 );
- empty->lengthWithHeaders = _length - (extentData - (char *) this);
- empty->extentOfs = myLoc.getOfs();
- return emptyLoc;
+DiskLoc Extent::init(const char *nsname, int _length, int _fileNo, int _offset) {
+ magic = 0x41424344;
+ myLoc.setOfs(_fileNo, _offset);
+ xnext.Null();
+ xprev.Null();
+ ns = nsname;
+ length = _length;
+ firstRecord.Null();
+ lastRecord.Null();
+
+ DiskLoc emptyLoc = myLoc;
+ emptyLoc.inc( (extentData-(char*)this) );
+
+ DeletedRecord *empty1 = (DeletedRecord *) extentData;
+ DeletedRecord *empty = (DeletedRecord *) getRecord(emptyLoc);
+ assert( empty == empty1 );
+ empty->lengthWithHeaders = _length - (extentData - (char *) this);
+ empty->extentOfs = myLoc.getOfs();
+ return emptyLoc;
}
/*
@@ -307,7 +309,7 @@ Record* Extent::newRecord(int len) {
}
lastRecord = newRecordLoc;
- if( left < Record::HeaderSize + 32 ) {
+ if( left < Record::HeaderSize + 32 ) {
firstEmptyRegion.Null();
}
else {
@@ -322,743 +324,747 @@ Record* Extent::newRecord(int len) {
}
*/
-/*---------------------------------------------------------------------*/
+/*---------------------------------------------------------------------*/
auto_ptr<Cursor> DataFileMgr::findAll(const char *ns) {
- DiskLoc loc;
- bool found = nsindex(ns)->find(ns, loc);
- if( !found ) {
- // cout << "info: findAll() namespace does not exist: " << ns << endl;
- return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
- }
+ DiskLoc loc;
+ bool found = nsindex(ns)->find(ns, loc);
+ if ( !found ) {
+ // cout << "info: findAll() namespace does not exist: " << ns << endl;
+ return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
+ }
- Extent *e = getExtent(loc);
-
- DEBUGGING {
- cout << "listing extents for " << ns << endl;
- DiskLoc tmp = loc;
- set<DiskLoc> extents;
-
- while( 1 ) {
- Extent *f = getExtent(tmp);
- cout << "extent: " << tmp.toString() << endl;
- extents.insert(tmp);
- tmp = f->xnext;
- if( tmp.isNull() )
- break;
- f = f->getNextExtent();
- }
-
- cout << endl;
- nsdetails(ns)->dumpDeleted(&extents);
- }
+ Extent *e = getExtent(loc);
+
+ DEBUGGING {
+ cout << "listing extents for " << ns << endl;
+ DiskLoc tmp = loc;
+ set<DiskLoc> extents;
+
+ while ( 1 ) {
+ Extent *f = getExtent(tmp);
+ cout << "extent: " << tmp.toString() << endl;
+ extents.insert(tmp);
+ tmp = f->xnext;
+ if ( tmp.isNull() )
+ break;
+ f = f->getNextExtent();
+ }
- while( e->firstRecord.isNull() && !e->xnext.isNull() ) {
- /* todo: if extent is empty, free it for reuse elsewhere.
- that is a bit complicated have to clean up the freelists.
- */
- RARELY cout << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead " << ns << endl;
- // find a nonempty extent
- // it might be nice to free the whole extent here! but have to clean up free recs then.
- e = e->getNextExtent();
- }
- return auto_ptr<Cursor>(new BasicCursor( e->firstRecord ));
+ cout << endl;
+ nsdetails(ns)->dumpDeleted(&extents);
+ }
+
+ while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
+ /* todo: if extent is empty, free it for reuse elsewhere.
+ that is a bit complicated have to clean up the freelists.
+ */
+ RARELY cout << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead " << ns << endl;
+ // find a nonempty extent
+ // it might be nice to free the whole extent here! but have to clean up free recs then.
+ e = e->getNextExtent();
+ }
+ return auto_ptr<Cursor>(new BasicCursor( e->firstRecord ));
}
/* get a table scan cursor, but can be forward or reverse direction.
order.$natural - if set, > 0 means forward (asc), < 0 backward (desc).
*/
auto_ptr<Cursor> findTableScan(const char *ns, BSONObj& order, bool *isSorted) {
- BSONElement el = order.findElement("$natural"); // e.g., { $natural : -1 }
- if( !el.eoo() && isSorted )
+ BSONElement el = order.findElement("$natural"); // e.g., { $natural : -1 }
+ if ( !el.eoo() && isSorted )
*isSorted = true;
- if( el.number() >= 0 )
- return DataFileMgr::findAll(ns);
-
- // "reverse natural order"
- NamespaceDetails *d = nsdetails(ns);
- if( !d )
- return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
- Extent *e = d->lastExtent.ext();
- while( e->lastRecord.isNull() && !e->xprev.isNull() ) {
- OCCASIONALLY cout << " findTableScan: extent empty, skipping ahead" << endl;
- e = e->getPrevExtent();
- }
- return auto_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
+ if ( el.number() >= 0 )
+ return DataFileMgr::findAll(ns);
+
+ // "reverse natural order"
+ NamespaceDetails *d = nsdetails(ns);
+ if ( !d )
+ return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
+ Extent *e = d->lastExtent.ext();
+ while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
+ OCCASIONALLY cout << " findTableScan: extent empty, skipping ahead" << endl;
+ e = e->getPrevExtent();
+ }
+ return auto_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
}
void aboutToDelete(const DiskLoc& dl);
/* drop a collection/namespace */
void dropNS(string& nsToDrop) {
- assert( strstr(nsToDrop.c_str(), ".system.") == 0 );
- {
- // remove from the system catalog
- BSONObjBuilder b;
- b.append("name", nsToDrop.c_str());
- BSONObj cond = b.done(); // { name: "colltodropname" }
- string system_namespaces = database->name + ".system.namespaces";
- int n = deleteObjects(system_namespaces.c_str(), cond, false, true);
- wassert( n == 1 );
- }
- // remove from the catalog hashtable
- database->namespaceIndex.kill(nsToDrop.c_str());
+ assert( strstr(nsToDrop.c_str(), ".system.") == 0 );
+ {
+ // remove from the system catalog
+ BSONObjBuilder b;
+ b.append("name", nsToDrop.c_str());
+ BSONObj cond = b.done(); // { name: "colltodropname" }
+ string system_namespaces = database->name + ".system.namespaces";
+ int n = deleteObjects(system_namespaces.c_str(), cond, false, true);
+ wassert( n == 1 );
+ }
+ // remove from the catalog hashtable
+ database->namespaceIndex.kill(nsToDrop.c_str());
}
/* delete this index. does NOT clean up the system catalog
(system.indexes or system.namespaces) -- only NamespaceIndex.
*/
-void IndexDetails::kill() {
- string ns = indexNamespace(); // e.g. foo.coll.$ts_1
-
- {
- // clean up in system.indexes
- BSONObjBuilder b;
- b.append("name", indexName().c_str());
- b.append("ns", parentNS().c_str());
- BSONObj cond = b.done(); // e.g.: { name: "ts_1", ns: "foo.coll" }
- string system_indexes = database->name + ".system.indexes";
- int n = deleteObjects(system_indexes.c_str(), cond, false, true);
- wassert( n == 1 );
- }
+void IndexDetails::kill() {
+ string ns = indexNamespace(); // e.g. foo.coll.$ts_1
+
+ {
+ // clean up in system.indexes
+ BSONObjBuilder b;
+ b.append("name", indexName().c_str());
+ b.append("ns", parentNS().c_str());
+ BSONObj cond = b.done(); // e.g.: { name: "ts_1", ns: "foo.coll" }
+ string system_indexes = database->name + ".system.indexes";
+ int n = deleteObjects(system_indexes.c_str(), cond, false, true);
+ wassert( n == 1 );
+ }
- dropNS(ns);
- // database->namespaceIndex.kill(ns.c_str());
- head.setInvalid();
- info.setInvalid();
+ dropNS(ns);
+ // database->namespaceIndex.kill(ns.c_str());
+ head.setInvalid();
+ info.setInvalid();
}
/* Pull out the relevant key objects from obj, so we
- can index them. Note that the set is multiple elements
+ can index them. Note that the set is multiple elements
only when it's a "multikey" array.
Keys will be left empty if key not found in the object.
*/
void IndexDetails::getKeysFromObject( const BSONObj& obj, set<BSONObj>& keys) const {
BSONObj keyPattern = info.obj().getObjectField("key"); // e.g., keyPattern == { ts : 1 }
- if( keyPattern.objsize() == 0 ) {
- cout << keyPattern.toString() << endl;
- cout << info.obj().toString() << endl;
- assert(false);
- }
- BSONObjBuilder b;
- const char *nameWithinArray;
- BSONObj key = obj.extractFieldsDotted(keyPattern, b, nameWithinArray);
- if( key.isEmpty() )
- return;
- BSONObjIterator keyIter( key );
- BSONElement arrayElt;
- int arrayPos = -1;
- for( int i = 0; keyIter.more(); ++i ) {
- BSONElement e = keyIter.next();
- if( e.eoo() )
- break;
- if( e.type() == Array ) {
- uassert( "Index cannot be created on parallel arrays.",
- arrayPos == -1 );
- arrayPos = i;
- arrayElt = e;
- }
- }
- if( arrayPos == -1 ) {
- assert( strlen( nameWithinArray ) == 0 );
- BSONObjBuilder b;
- BSONObjIterator keyIter( key );
- while( keyIter.more() ) {
- BSONElement f = keyIter.next();
- if ( f.eoo() )
- break;
- b.append( f );
- }
- BSONObj o = b.doneAndDecouple();
- assert( !o.isEmpty() );
- keys.insert(o);
- return;
- }
- BSONObj arr = arrayElt.embeddedObject();
- BSONObjIterator arrIter(arr);
- while( arrIter.more() ) {
- BSONElement e = arrIter.next();
- if( e.eoo() )
- break;
-
- if ( strlen( nameWithinArray ) != 0 ) {
- e = e.embeddedObject().getFieldDotted( nameWithinArray );
- if( e.eoo() )
- continue;
- }
- BSONObjBuilder b;
- BSONObjIterator keyIter( key );
- for( int i = 0; keyIter.more(); ++i ) {
- BSONElement f = keyIter.next();
- if ( f.eoo() )
- break;
- if ( i != arrayPos )
- b.append( f );
- else
- b.appendAs( e, "" );
- }
-
- BSONObj o = b.doneAndDecouple();
- assert( !o.isEmpty() );
- keys.insert(o);
- }
+ if ( keyPattern.objsize() == 0 ) {
+ cout << keyPattern.toString() << endl;
+ cout << info.obj().toString() << endl;
+ assert(false);
+ }
+ BSONObjBuilder b;
+ const char *nameWithinArray;
+ BSONObj key = obj.extractFieldsDotted(keyPattern, b, nameWithinArray);
+ if ( key.isEmpty() )
+ return;
+ BSONObjIterator keyIter( key );
+ BSONElement arrayElt;
+ int arrayPos = -1;
+ for ( int i = 0; keyIter.more(); ++i ) {
+ BSONElement e = keyIter.next();
+ if ( e.eoo() )
+ break;
+ if ( e.type() == Array ) {
+ uassert( "Index cannot be created on parallel arrays.",
+ arrayPos == -1 );
+ arrayPos = i;
+ arrayElt = e;
+ }
+ }
+ if ( arrayPos == -1 ) {
+ assert( strlen( nameWithinArray ) == 0 );
+ BSONObjBuilder b;
+ BSONObjIterator keyIter( key );
+ while ( keyIter.more() ) {
+ BSONElement f = keyIter.next();
+ if ( f.eoo() )
+ break;
+ b.append( f );
+ }
+ BSONObj o = b.doneAndDecouple();
+ assert( !o.isEmpty() );
+ keys.insert(o);
+ return;
+ }
+ BSONObj arr = arrayElt.embeddedObject();
+ BSONObjIterator arrIter(arr);
+ while ( arrIter.more() ) {
+ BSONElement e = arrIter.next();
+ if ( e.eoo() )
+ break;
+
+ if ( strlen( nameWithinArray ) != 0 ) {
+ e = e.embeddedObject().getFieldDotted( nameWithinArray );
+ if ( e.eoo() )
+ continue;
+ }
+ BSONObjBuilder b;
+ BSONObjIterator keyIter( key );
+ for ( int i = 0; keyIter.more(); ++i ) {
+ BSONElement f = keyIter.next();
+ if ( f.eoo() )
+ break;
+ if ( i != arrayPos )
+ b.append( f );
+ else
+ b.appendAs( e, "" );
+ }
+
+ BSONObj o = b.doneAndDecouple();
+ assert( !o.isEmpty() );
+ keys.insert(o);
+ }
}
int nUnindexes = 0;
-void _unindexRecord(const char *ns, IndexDetails& id, BSONObj& obj, const DiskLoc& dl) {
- set<BSONObj> keys;
- id.getKeysFromObject(obj, keys);
- for( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
- BSONObj j = *i;
+void _unindexRecord(const char *ns, IndexDetails& id, BSONObj& obj, const DiskLoc& dl) {
+ set<BSONObj> keys;
+ id.getKeysFromObject(obj, keys);
+ for ( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ BSONObj j = *i;
// cout << "UNINDEX: j:" << j.toString() << " head:" << id.head.toString() << dl.toString() << endl;
- if( otherTraceLevel >= 5 ) {
- cout << "_unindexRecord() " << obj.toString();
- cout << "\n unindex:" << j.toString() << endl;
- }
- nUnindexes++;
- bool ok = false;
- try {
- ok = id.head.btree()->unindex(id.head, id, j, dl);
- }
- catch(AssertionException&) {
- problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
- cout << "Assertion failure: _unindex failed" << '\n';
- cout << " obj:" << obj.toString() << '\n';
- cout << " key:" << j.toString() << '\n';
- cout << " dl:" << dl.toString() << endl;
- sayDbContext();
- }
-
- if( !ok ) {
- cout << "unindex failed (key too big?) " << id.indexNamespace() << '\n';
- }
- }
+ if ( otherTraceLevel >= 5 ) {
+ cout << "_unindexRecord() " << obj.toString();
+ cout << "\n unindex:" << j.toString() << endl;
+ }
+ nUnindexes++;
+ bool ok = false;
+ try {
+ ok = id.head.btree()->unindex(id.head, id, j, dl);
+ }
+ catch (AssertionException&) {
+ problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
+ cout << "Assertion failure: _unindex failed" << '\n';
+ cout << " obj:" << obj.toString() << '\n';
+ cout << " key:" << j.toString() << '\n';
+ cout << " dl:" << dl.toString() << endl;
+ sayDbContext();
+ }
+
+ if ( !ok ) {
+ cout << "unindex failed (key too big?) " << id.indexNamespace() << '\n';
+ }
+ }
}
/* unindex all keys in all indexes for this record. */
void unindexRecord(const char *ns, NamespaceDetails *d, Record *todelete, const DiskLoc& dl) {
- if( d->nIndexes == 0 ) return;
- BSONObj obj(todelete);
- for( int i = 0; i < d->nIndexes; i++ ) {
- _unindexRecord(ns, d->indexes[i], obj, dl);
- }
+ if ( d->nIndexes == 0 ) return;
+ BSONObj obj(todelete);
+ for ( int i = 0; i < d->nIndexes; i++ ) {
+ _unindexRecord(ns, d->indexes[i], obj, dl);
+ }
}
-void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK)
+void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK)
{
- dassert( todelete == dl.rec() );
+ dassert( todelete == dl.rec() );
- NamespaceDetails* d = nsdetails(ns);
- if( d->capped && !cappedOK ) {
- cout << "failing remove on a capped ns " << ns << endl;
- return;
- }
+ NamespaceDetails* d = nsdetails(ns);
+ if ( d->capped && !cappedOK ) {
+ cout << "failing remove on a capped ns " << ns << endl;
+ return;
+ }
- /* check if any cursors point to us. if so, advance them. */
- aboutToDelete(dl);
+ /* check if any cursors point to us. if so, advance them. */
+ aboutToDelete(dl);
- unindexRecord(ns, d, todelete, dl);
+ unindexRecord(ns, d, todelete, dl);
- /* remove ourself from the record next/prev chain */
- {
- if( todelete->prevOfs != DiskLoc::NullOfs )
- todelete->getPrev(dl).rec()->nextOfs = todelete->nextOfs;
- if( todelete->nextOfs != DiskLoc::NullOfs )
- todelete->getNext(dl).rec()->prevOfs = todelete->prevOfs;
- }
+ /* remove ourself from the record next/prev chain */
+ {
+ if ( todelete->prevOfs != DiskLoc::NullOfs )
+ todelete->getPrev(dl).rec()->nextOfs = todelete->nextOfs;
+ if ( todelete->nextOfs != DiskLoc::NullOfs )
+ todelete->getNext(dl).rec()->prevOfs = todelete->prevOfs;
+ }
- /* remove ourself from extent pointers */
- {
- Extent *e = todelete->myExtent(dl);
- if( e->firstRecord == dl ) {
- if( todelete->nextOfs == DiskLoc::NullOfs )
- e->firstRecord.Null();
- else
- e->firstRecord.setOfs(dl.a(), todelete->nextOfs);
- }
- if( e->lastRecord == dl ) {
- if( todelete->prevOfs == DiskLoc::NullOfs )
- e->lastRecord.Null();
- else
- e->lastRecord.setOfs(dl.a(), todelete->prevOfs);
- }
- }
+ /* remove ourself from extent pointers */
+ {
+ Extent *e = todelete->myExtent(dl);
+ if ( e->firstRecord == dl ) {
+ if ( todelete->nextOfs == DiskLoc::NullOfs )
+ e->firstRecord.Null();
+ else
+ e->firstRecord.setOfs(dl.a(), todelete->nextOfs);
+ }
+ if ( e->lastRecord == dl ) {
+ if ( todelete->prevOfs == DiskLoc::NullOfs )
+ e->lastRecord.Null();
+ else
+ e->lastRecord.setOfs(dl.a(), todelete->prevOfs);
+ }
+ }
- /* add to the free list */
- {
- d->nrecords--;
- d->datasize -= todelete->netLength();
- /* temp: if in system.indexes, don't reuse, and zero out: we want to be
+ /* add to the free list */
+ {
+ d->nrecords--;
+ d->datasize -= todelete->netLength();
+ /* temp: if in system.indexes, don't reuse, and zero out: we want to be
careful until validated more, as IndexDetails has pointers
- to this disk location. so an incorrectly done remove would cause
- a lot of problems.
+ to this disk location. so an incorrectly done remove would cause
+ a lot of problems.
*/
- if( strstr(ns, ".system.indexes") ) {
- memset(todelete, 0, todelete->lengthWithHeaders);
- }
- else {
- DEV memset(todelete->data, 0, todelete->netLength()); // attempt to notice invalid reuse.
- d->addDeletedRec((DeletedRecord*)todelete, dl);
- }
- }
+ if ( strstr(ns, ".system.indexes") ) {
+ memset(todelete, 0, todelete->lengthWithHeaders);
+ }
+ else {
+ DEV memset(todelete->data, 0, todelete->netLength()); // attempt to notice invalid reuse.
+ d->addDeletedRec((DeletedRecord*)todelete, dl);
+ }
+ }
}
-void setDifference(set<BSONObj>& l, set<BSONObj>& r, vector<BSONObj*> &diff) {
- set<BSONObj>::iterator i = l.begin();
- set<BSONObj>::iterator j = r.begin();
- while( 1 ) {
- if( i == l.end() )
- break;
- while( j != r.end() && *j < *i )
- j++;
- if( j == r.end() || !i->woEqual(*j) ) {
- const BSONObj *jo = &*i;
- diff.push_back( (BSONObj *) jo );
- }
- i++;
- }
+void setDifference(set<BSONObj>& l, set<BSONObj>& r, vector<BSONObj*> &diff) {
+ set<BSONObj>::iterator i = l.begin();
+ set<BSONObj>::iterator j = r.begin();
+ while ( 1 ) {
+ if ( i == l.end() )
+ break;
+ while ( j != r.end() && *j < *i )
+ j++;
+ if ( j == r.end() || !i->woEqual(*j) ) {
+ const BSONObj *jo = &*i;
+ diff.push_back( (BSONObj *) jo );
+ }
+ i++;
+ }
}
/** Note: as written so far, if the object shrinks a lot, we don't free up space. */
void DataFileMgr::update(
- const char *ns,
- Record *toupdate, const DiskLoc& dl,
- const char *buf, int len, stringstream& ss)
+ const char *ns,
+ Record *toupdate, const DiskLoc& dl,
+ const char *buf, int len, stringstream& ss)
{
- dassert( toupdate == dl.rec() );
+ dassert( toupdate == dl.rec() );
- NamespaceDetails *d = nsdetails(ns);
+ NamespaceDetails *d = nsdetails(ns);
- if( toupdate->netLength() < len ) {
- // doesn't fit. must reallocate.
+ if ( toupdate->netLength() < len ) {
+ // doesn't fit. must reallocate.
- if( d && d->capped ) {
- ss << " failing a growing update on a capped ns " << ns << endl;
- return;
- }
+ if ( d && d->capped ) {
+ ss << " failing a growing update on a capped ns " << ns << endl;
+ return;
+ }
- d->paddingTooSmall();
- if( database->profile )
- ss << " moved ";
- deleteRecord(ns, toupdate, dl);
- insert(ns, buf, len);
- return;
- }
+ d->paddingTooSmall();
+ if ( database->profile )
+ ss << " moved ";
+ deleteRecord(ns, toupdate, dl);
+ insert(ns, buf, len);
+ return;
+ }
- d->paddingFits();
-
- /* has any index keys changed? */
- {
- NamespaceDetails *d = nsdetails(ns);
- if( d->nIndexes ) {
- BSONObj newObj(buf);
- BSONObj oldObj = dl.obj();
- for( int i = 0; i < d->nIndexes; i++ ) {
- IndexDetails& idx = d->indexes[i];
- BSONObj idxKey = idx.info.obj().getObjectField("key");
-
- set<BSONObj> oldkeys;
- set<BSONObj> newkeys;
- idx.getKeysFromObject(oldObj, oldkeys);
- idx.getKeysFromObject(newObj, newkeys);
- vector<BSONObj*> removed;
- setDifference(oldkeys, newkeys, removed);
- string idxns = idx.indexNamespace();
- for( unsigned i = 0; i < removed.size(); i++ ) {
- try {
- idx.head.btree()->unindex(idx.head, idx, *removed[i], dl);
- }
- catch(AssertionException&) {
- ss << " exception update unindex ";
- problem() << " caught assertion update unindex " << idxns.c_str() << endl;
- }
- }
- vector<BSONObj*> added;
- setDifference(newkeys, oldkeys, added);
- assert( !dl.isNull() );
- for( unsigned i = 0; i < added.size(); i++ ) {
- try {
- idx.head.btree()->insert(
- idx.head,
- dl, *added[i], false, idx, true);
- }
- catch(AssertionException&) {
- ss << " exception update index ";
- cout << " caught assertion update index " << idxns.c_str() << '\n';
- problem() << " caught assertion update index " << idxns.c_str() << endl;
- }
- }
- if( database->profile )
- ss << "<br>" << added.size() << " key updates ";
-
- }
- }
- }
+ d->paddingFits();
- // update in place
- memcpy(toupdate->data, buf, len);
+ /* has any index keys changed? */
+ {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d->nIndexes ) {
+ BSONObj newObj(buf);
+ BSONObj oldObj = dl.obj();
+ for ( int i = 0; i < d->nIndexes; i++ ) {
+ IndexDetails& idx = d->indexes[i];
+ BSONObj idxKey = idx.info.obj().getObjectField("key");
+
+ set<BSONObj> oldkeys;
+ set<BSONObj> newkeys;
+ idx.getKeysFromObject(oldObj, oldkeys);
+ idx.getKeysFromObject(newObj, newkeys);
+ vector<BSONObj*> removed;
+ setDifference(oldkeys, newkeys, removed);
+ string idxns = idx.indexNamespace();
+ for ( unsigned i = 0; i < removed.size(); i++ ) {
+ try {
+ idx.head.btree()->unindex(idx.head, idx, *removed[i], dl);
+ }
+ catch (AssertionException&) {
+ ss << " exception update unindex ";
+ problem() << " caught assertion update unindex " << idxns.c_str() << endl;
+ }
+ }
+ vector<BSONObj*> added;
+ setDifference(newkeys, oldkeys, added);
+ assert( !dl.isNull() );
+ for ( unsigned i = 0; i < added.size(); i++ ) {
+ try {
+ idx.head.btree()->insert(
+ idx.head,
+ dl, *added[i], false, idx, true);
+ }
+ catch (AssertionException&) {
+ ss << " exception update index ";
+ cout << " caught assertion update index " << idxns.c_str() << '\n';
+ problem() << " caught assertion update index " << idxns.c_str() << endl;
+ }
+ }
+ if ( database->profile )
+ ss << "<br>" << added.size() << " key updates ";
+
+ }
+ }
+ }
+
+ // update in place
+ memcpy(toupdate->data, buf, len);
}
int followupExtentSize(int len, int lastExtentLen) {
- int x = initialExtentSize(len);
- int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2);
- int sz = y > x ? y : x;
- sz = ((int)sz) & 0xffffff00;
- assert( sz > len );
- return sz;
+ int x = initialExtentSize(len);
+ int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2);
+ int sz = y > x ? y : x;
+ sz = ((int)sz) & 0xffffff00;
+ assert( sz > len );
+ return sz;
}
int deb=0;
/* add keys to indexes for a new record */
-void _indexRecord(IndexDetails& idx, BSONObj& obj, DiskLoc newRecordLoc) {
-
- set<BSONObj> keys;
- idx.getKeysFromObject(obj, keys);
- for( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
- assert( !newRecordLoc.isNull() );
- try {
- idx.head.btree()->insert(idx.head, newRecordLoc,
- (BSONObj&) *i, false, idx, true);
- }
- catch(AssertionException&) {
- problem() << " caught assertion _indexRecord " << idx.indexNamespace() << endl;
- }
- }
+void _indexRecord(IndexDetails& idx, BSONObj& obj, DiskLoc newRecordLoc) {
+
+ set<BSONObj> keys;
+ idx.getKeysFromObject(obj, keys);
+ for ( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ assert( !newRecordLoc.isNull() );
+ try {
+ idx.head.btree()->insert(idx.head, newRecordLoc,
+ (BSONObj&) *i, false, idx, true);
+ }
+ catch (AssertionException&) {
+ problem() << " caught assertion _indexRecord " << idx.indexNamespace() << endl;
+ }
+ }
}
-/* note there are faster ways to build an index in bulk, that can be
+/* note there are faster ways to build an index in bulk, that can be
done eventually */
void addExistingToIndex(const char *ns, IndexDetails& idx) {
- log() << "Adding all existing records for " << ns << " to new index" << endl;
- int n = 0;
- auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
- while( c->ok() ) {
- BSONObj js = c->current();
- _indexRecord(idx, js, c->currLoc());
- c->advance();
- n++;
- };
- log() << " indexing complete for " << n << " records" << endl;
+ log() << "Adding all existing records for " << ns << " to new index" << endl;
+ int n = 0;
+ auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ while ( c->ok() ) {
+ BSONObj js = c->current();
+ _indexRecord(idx, js, c->currLoc());
+ c->advance();
+ n++;
+ };
+ log() << " indexing complete for " << n << " records" << endl;
}
/* add keys to indexes for a new record */
-void indexRecord(NamespaceDetails *d, const void *buf, int len, DiskLoc newRecordLoc) {
- BSONObj obj((const char *)buf);
- for( int i = 0; i < d->nIndexes; i++ ) {
- _indexRecord(d->indexes[i], obj, newRecordLoc);
- }
+void indexRecord(NamespaceDetails *d, const void *buf, int len, DiskLoc newRecordLoc) {
+ BSONObj obj((const char *)buf);
+ for ( int i = 0; i < d->nIndexes; i++ ) {
+ _indexRecord(d->indexes[i], obj, newRecordLoc);
+ }
}
extern BSONObj emptyObj;
extern BSONObj id_obj; // = fromjson("{_id:ObjId()}");
-void ensureHaveIdIndex(const char *ns) {
- NamespaceDetails *d = nsdetails(ns);
- if( d == 0 || (d->flags & NamespaceDetails::Flag_HaveIdIndex) )
- return;
+void ensureHaveIdIndex(const char *ns) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 || (d->flags & NamespaceDetails::Flag_HaveIdIndex) )
+ return;
- d->flags |= NamespaceDetails::Flag_HaveIdIndex;
+ d->flags |= NamespaceDetails::Flag_HaveIdIndex;
- string system_indexes = database->name + ".system.indexes";
+ string system_indexes = database->name + ".system.indexes";
- BSONObjBuilder b;
- b.append("name", "_id_");
- b.append("ns", ns);
- b.append("key", id_obj);
- BSONObj o = b.done();
+ BSONObjBuilder b;
+ b.append("name", "_id_");
+ b.append("ns", ns);
+ b.append("key", id_obj);
+ BSONObj o = b.done();
- /* edge case: note the insert could fail if we have hit maxindexes already */
- theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize());
+ /* edge case: note the insert could fail if we have hit maxindexes already */
+ theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize());
}
DiskLoc DataFileMgr::insert(const char *ns, const void *buf, int len, bool god) {
- bool addIndex = false;
- const char *sys = strstr(ns, "system.");
- if( sys ) {
- if( sys == ns ) {
- cout << "ERROR: attempt to insert for invalid database 'system': " << ns << endl;
- return DiskLoc();
- }
- if( strstr(ns, ".system.") ) {
- if( strstr(ns, ".system.indexes") )
+ bool addIndex = false;
+ const char *sys = strstr(ns, "system.");
+ if ( sys ) {
+ if ( sys == ns ) {
+ cout << "ERROR: attempt to insert for invalid database 'system': " << ns << endl;
+ return DiskLoc();
+ }
+ if ( strstr(ns, ".system.") ) {
+ if ( strstr(ns, ".system.indexes") )
addIndex = true;
- else if( !god ) {
- cout << "ERROR: attempt to insert in system namespace " << ns << endl;
- return DiskLoc();
- }
- }
- }
+ else if ( !god ) {
+ cout << "ERROR: attempt to insert in system namespace " << ns << endl;
+ return DiskLoc();
+ }
+ }
+ }
- NamespaceDetails *d = nsdetails(ns);
- if( d == 0 ) {
- addNewNamespaceToCatalog(ns);
- /* todo: shouldn't be in the namespace catalog until after the allocations here work.
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 ) {
+ addNewNamespaceToCatalog(ns);
+ /* todo: shouldn't be in the namespace catalog until after the allocations here work.
also if this is an addIndex, those checks should happen before this!
- */
- database->newestFile()->newExtent(ns, initialExtentSize(len));
- d = nsdetails(ns);
- }
- d->paddingFits();
+ */
+ database->newestFile()->newExtent(ns, initialExtentSize(len));
+ d = nsdetails(ns);
+ }
+ d->paddingFits();
- NamespaceDetails *tableToIndex = 0;
+ NamespaceDetails *tableToIndex = 0;
string tabletoidxns;
- if( addIndex ) {
- BSONObj io((const char *) buf);
- const char *name = io.getStringField("name"); // name of the index
- tabletoidxns = io.getStringField("ns"); // table it indexes
+ if ( addIndex ) {
+ BSONObj io((const char *) buf);
+ const char *name = io.getStringField("name"); // name of the index
+ tabletoidxns = io.getStringField("ns"); // table it indexes
- if( database->name != nsToClient(tabletoidxns.c_str()) ) {
+ if ( database->name != nsToClient(tabletoidxns.c_str()) ) {
uassert("bad table to index name on add index attempt", false);
return DiskLoc();
}
- BSONObj key = io.getObjectField("key");
- if( *name == 0 || tabletoidxns.empty() || key.isEmpty() || key.objsize() > 2048 ) {
- cout << "user warning: bad add index attempt name:" << (name?name:"") << "\n ns:" <<
- tabletoidxns << "\n ourns:" << ns;
- cout << "\n idxobj:" << io.toString() << endl;
- return DiskLoc();
- }
- tableToIndex = nsdetails(tabletoidxns.c_str());
- if( tableToIndex == 0 ) {
- // try to create it
- string err;
- if( !userCreateNS(tabletoidxns.c_str(), emptyObj, err, false) ) {
- problem() << "ERROR: failed to create collection while adding its index. " << tabletoidxns << endl;
- return DiskLoc();
- }
- tableToIndex = nsdetails(tabletoidxns.c_str());
- log() << "info: creating collection " << tabletoidxns << " on add index\n";
- assert( tableToIndex );
- }
- if( tableToIndex->nIndexes >= MaxIndexes ) {
- log() << "user warning: bad add index attempt, too many indexes for:" << tabletoidxns << endl;
- return DiskLoc();
- }
- if( tableToIndex->findIndexByName(name) >= 0 ) {
- //cout << "INFO: index:" << name << " already exists for:" << tabletoidxns << endl;
- return DiskLoc();
- }
- //indexFullNS = tabletoidxns;
- //indexFullNS += ".$";
- //indexFullNS += name; // database.table.$index -- note this doesn't contain jsobjs, it contains BtreeBuckets.
- }
+ BSONObj key = io.getObjectField("key");
+ if ( *name == 0 || tabletoidxns.empty() || key.isEmpty() || key.objsize() > 2048 ) {
+ cout << "user warning: bad add index attempt name:" << (name?name:"") << "\n ns:" <<
+ tabletoidxns << "\n ourns:" << ns;
+ cout << "\n idxobj:" << io.toString() << endl;
+ return DiskLoc();
+ }
+ tableToIndex = nsdetails(tabletoidxns.c_str());
+ if ( tableToIndex == 0 ) {
+ // try to create it
+ string err;
+ if ( !userCreateNS(tabletoidxns.c_str(), emptyObj, err, false) ) {
+ problem() << "ERROR: failed to create collection while adding its index. " << tabletoidxns << endl;
+ return DiskLoc();
+ }
+ tableToIndex = nsdetails(tabletoidxns.c_str());
+ log() << "info: creating collection " << tabletoidxns << " on add index\n";
+ assert( tableToIndex );
+ }
+ if ( tableToIndex->nIndexes >= MaxIndexes ) {
+ log() << "user warning: bad add index attempt, too many indexes for:" << tabletoidxns << endl;
+ return DiskLoc();
+ }
+ if ( tableToIndex->findIndexByName(name) >= 0 ) {
+ //cout << "INFO: index:" << name << " already exists for:" << tabletoidxns << endl;
+ return DiskLoc();
+ }
+ //indexFullNS = tabletoidxns;
+ //indexFullNS += ".$";
+ //indexFullNS += name; // database.table.$index -- note this doesn't contain jsobjs, it contains BtreeBuckets.
+ }
- DiskLoc extentLoc;
- int lenWHdr = len + Record::HeaderSize;
- lenWHdr = (int) (lenWHdr * d->paddingFactor);
- if( lenWHdr == 0 ) {
- // old datafiles, backward compatible here.
- assert( d->paddingFactor == 0 );
- d->paddingFactor = 1.0;
- lenWHdr = len + Record::HeaderSize;
- }
- DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
- if( loc.isNull() ) {
- // out of space
- if( d->capped == 0 ) { // size capped doesn't grow
- DEV log() << "allocating new extent for " << ns << " padding:" << d->paddingFactor << endl;
- database->newestFile()->newExtent(ns, followupExtentSize(len, d->lastExtentSize));
- loc = d->alloc(ns, lenWHdr, extentLoc);
- }
- if( loc.isNull() ) {
- log() << "out of space in datafile " << ns << " capped:" << d->capped << endl;
- assert(d->capped);
- return DiskLoc();
- }
- }
+ DiskLoc extentLoc;
+ int lenWHdr = len + Record::HeaderSize;
+ lenWHdr = (int) (lenWHdr * d->paddingFactor);
+ if ( lenWHdr == 0 ) {
+ // old datafiles, backward compatible here.
+ assert( d->paddingFactor == 0 );
+ d->paddingFactor = 1.0;
+ lenWHdr = len + Record::HeaderSize;
+ }
+ DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ // out of space
+ if ( d->capped == 0 ) { // size capped doesn't grow
+ DEV log() << "allocating new extent for " << ns << " padding:" << d->paddingFactor << endl;
+ database->newestFile()->newExtent(ns, followupExtentSize(len, d->lastExtentSize));
+ loc = d->alloc(ns, lenWHdr, extentLoc);
+ }
+ if ( loc.isNull() ) {
+ log() << "out of space in datafile " << ns << " capped:" << d->capped << endl;
+ assert(d->capped);
+ return DiskLoc();
+ }
+ }
- Record *r = loc.rec();
- assert( r->lengthWithHeaders >= lenWHdr );
- memcpy(r->data, buf, len);
- Extent *e = r->myExtent(loc);
- if( e->lastRecord.isNull() ) {
- e->firstRecord = e->lastRecord = loc;
- r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
- }
- else {
+ Record *r = loc.rec();
+ assert( r->lengthWithHeaders >= lenWHdr );
+ memcpy(r->data, buf, len);
+ Extent *e = r->myExtent(loc);
+ if ( e->lastRecord.isNull() ) {
+ e->firstRecord = e->lastRecord = loc;
+ r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
+ }
+ else {
- Record *oldlast = e->lastRecord.rec();
- r->prevOfs = e->lastRecord.getOfs();
- r->nextOfs = DiskLoc::NullOfs;
- oldlast->nextOfs = loc.getOfs();
- e->lastRecord = loc;
- }
+ Record *oldlast = e->lastRecord.rec();
+ r->prevOfs = e->lastRecord.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ oldlast->nextOfs = loc.getOfs();
+ e->lastRecord = loc;
+ }
- d->nrecords++;
- d->datasize += r->netLength();
+ d->nrecords++;
+ d->datasize += r->netLength();
- if( tableToIndex ) {
- IndexDetails& idxinfo = tableToIndex->indexes[tableToIndex->nIndexes];
- idxinfo.info = loc;
- idxinfo.head = BtreeBucket::addHead(idxinfo);
- tableToIndex->addingIndex(tabletoidxns.c_str(), idxinfo);
- /* todo: index existing records here */
- addExistingToIndex(tabletoidxns.c_str(), idxinfo);
- }
+ if ( tableToIndex ) {
+ IndexDetails& idxinfo = tableToIndex->indexes[tableToIndex->nIndexes];
+ idxinfo.info = loc;
+ idxinfo.head = BtreeBucket::addHead(idxinfo);
+ tableToIndex->addingIndex(tabletoidxns.c_str(), idxinfo);
+ /* todo: index existing records here */
+ addExistingToIndex(tabletoidxns.c_str(), idxinfo);
+ }
- /* add this record to our indexes */
- if( d->nIndexes )
- indexRecord(d, buf, len, loc);
+ /* add this record to our indexes */
+ if ( d->nIndexes )
+ indexRecord(d, buf, len, loc);
// cout << " inserted at loc:" << hex << loc.getOfs() << " lenwhdr:" << hex << lenWHdr << dec << ' ' << ns << endl;
- return loc;
+ return loc;
}
/* special version of insert for transaction logging -- streamlined a bit.
assumes ns is capped and no indexes
*/
Record* DataFileMgr::fast_oplog_insert(NamespaceDetails *d, const char *ns, int len) {
- RARELY assert( d == nsdetails(ns) );
-
- DiskLoc extentLoc;
- int lenWHdr = len + Record::HeaderSize;
- DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
- if( loc.isNull() ) {
- assert(false);
- return 0;
- }
+ RARELY assert( d == nsdetails(ns) );
+
+ DiskLoc extentLoc;
+ int lenWHdr = len + Record::HeaderSize;
+ DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ assert(false);
+ return 0;
+ }
- Record *r = loc.rec();
- assert( r->lengthWithHeaders >= lenWHdr );
+ Record *r = loc.rec();
+ assert( r->lengthWithHeaders >= lenWHdr );
- Extent *e = r->myExtent(loc);
- if( e->lastRecord.isNull() ) {
- e->firstRecord = e->lastRecord = loc;
- r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
- }
- else {
- Record *oldlast = e->lastRecord.rec();
- r->prevOfs = e->lastRecord.getOfs();
- r->nextOfs = DiskLoc::NullOfs;
- oldlast->nextOfs = loc.getOfs();
- e->lastRecord = loc;
- }
+ Extent *e = r->myExtent(loc);
+ if ( e->lastRecord.isNull() ) {
+ e->firstRecord = e->lastRecord = loc;
+ r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
+ }
+ else {
+ Record *oldlast = e->lastRecord.rec();
+ r->prevOfs = e->lastRecord.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ oldlast->nextOfs = loc.getOfs();
+ e->lastRecord = loc;
+ }
- d->nrecords++;
+ d->nrecords++;
- return r;
+ return r;
}
void DataFileMgr::init(const char *dir) {
-/* boost::filesystem::path path( dir );
- path /= "temp.dat";
- string pathString = path.string();
- temp.open(pathString.c_str(), 64 * 1024 * 1024);
-*/
+ /* boost::filesystem::path path( dir );
+ path /= "temp.dat";
+ string pathString = path.string();
+ temp.open(pathString.c_str(), 64 * 1024 * 1024);
+ */
}
void pdfileInit() {
// namespaceIndex.init(dbpath);
- theDataFileMgr.init(dbpath);
+ theDataFileMgr.init(dbpath);
}
#include "clientcursor.h"
-void dropDatabase(const char *ns) {
- // ns is of the form "<dbname>.$cmd"
- char cl[256];
- nsToClient(ns, cl);
- problem() << "dropDatabase " << cl << endl;
- assert( database->name == cl );
+void dropDatabase(const char *ns) {
+ // ns is of the form "<dbname>.$cmd"
+ char cl[256];
+ nsToClient(ns, cl);
+ problem() << "dropDatabase " << cl << endl;
+ assert( database->name == cl );
- closeClient( cl );
- _deleteDataFiles(cl);
+ closeClient( cl );
+ _deleteDataFiles(cl);
}
typedef boost::filesystem::path Path;
// back up original database files to 'temp' dir
void _renameForBackup( const char *database, const Path &tmpPath ) {
- class Renamer : public FileOp {
- public:
- Renamer( const Path &tmpPath ) : tmpPath_( tmpPath ) {}
- private:
- const boost::filesystem::path &tmpPath_;
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- boost::filesystem::rename( p, tmpPath_ / ( p.leaf() + ".bak" ) );
- return true;
- }
- virtual const char * op() const { return "renaming"; }
- } renamer( tmpPath );
- _applyOpToDataFiles( database, renamer );
+ class Renamer : public FileOp {
+ public:
+ Renamer( const Path &tmpPath ) : tmpPath_( tmpPath ) {}
+ private:
+ const boost::filesystem::path &tmpPath_;
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ boost::filesystem::rename( p, tmpPath_ / ( p.leaf() + ".bak" ) );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } renamer( tmpPath );
+ _applyOpToDataFiles( database, renamer );
}
// move temp files to standard data dir
void _replaceWithRecovered( const char *database, const char *tmpPathString ) {
- class : public FileOp {
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
+ class : public FileOp {
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
boost::filesystem::rename( p, boost::filesystem::path(dbpath) / p.leaf() );
- return true;
- }
- virtual const char * op() const { return "renaming"; }
- } renamer;
- _applyOpToDataFiles( database, renamer, tmpPathString );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } renamer;
+ _applyOpToDataFiles( database, renamer, tmpPathString );
}
// generate a directory name for storing temp data files
Path uniqueTmpPath() {
- Path dbPath = Path( dbpath );
- Path tmpPath;
- int i = 0;
- bool exists = false;
- do {
- stringstream ss;
- ss << "tmp_repairDatabase_" << i++;
- tmpPath = dbPath / ss.str();
- BOOST_CHECK_EXCEPTION( exists = boost::filesystem::exists( tmpPath ) );
- } while( exists );
- return tmpPath;
+ Path dbPath = Path( dbpath );
+ Path tmpPath;
+ int i = 0;
+ bool exists = false;
+ do {
+ stringstream ss;
+ ss << "tmp_repairDatabase_" << i++;
+ tmpPath = dbPath / ss.str();
+ BOOST_CHECK_EXCEPTION( exists = boost::filesystem::exists( tmpPath ) );
+ } while ( exists );
+ return tmpPath;
}
bool repairDatabase( const char *ns, bool preserveClonedFilesOnFailure,
- bool backupOriginalFiles ) {
- stringstream ss;
- ss << "localhost:" << port;
- string localhost = ss.str();
-
- // ns is of the form "<dbname>.$cmd"
- char dbName[256];
- nsToClient(ns, dbName);
- problem() << "repairDatabase " << dbName << endl;
- assert( database->name == dbName );
-
- Path tmpPath = uniqueTmpPath();
- BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( tmpPath ) );
- string tmpPathString = tmpPath.native_directory_string();
- assert( setClient( dbName, tmpPathString.c_str() ) );
-
- string errmsg;
- bool res = cloneFrom(localhost.c_str(), errmsg, dbName, /*logForReplication=*/false, /*slaveok*/false);
- closeClient( dbName, tmpPathString.c_str() );
-
- if ( !res ) {
- problem() << "clone failed for " << dbName << endl;
- if ( !preserveClonedFilesOnFailure )
- BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( tmpPath ) );
- return false;
- }
+ bool backupOriginalFiles ) {
+ stringstream ss;
+ ss << "localhost:" << port;
+ string localhost = ss.str();
+
+ // ns is of the form "<dbname>.$cmd"
+ char dbName[256];
+ nsToClient(ns, dbName);
+ problem() << "repairDatabase " << dbName << endl;
+ assert( database->name == dbName );
+
+ Path tmpPath = uniqueTmpPath();
+ BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( tmpPath ) );
+ string tmpPathString = tmpPath.native_directory_string();
+ assert( setClient( dbName, tmpPathString.c_str() ) );
+
+ string errmsg;
+ bool res = cloneFrom(localhost.c_str(), errmsg, dbName, /*logForReplication=*/false, /*slaveok*/false);
+ closeClient( dbName, tmpPathString.c_str() );
+
+ if ( !res ) {
+ problem() << "clone failed for " << dbName << endl;
+ if ( !preserveClonedFilesOnFailure )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( tmpPath ) );
+ return false;
+ }
+
+ assert( !setClientTempNs( dbName ) );
+ closeClient( dbName );
+
+ if ( backupOriginalFiles )
+ _renameForBackup( dbName, tmpPath );
+ else
+ _deleteDataFiles( dbName );
+
+ _replaceWithRecovered( dbName, tmpPathString.c_str() );
+
+ if ( !backupOriginalFiles )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( tmpPath ) );
- assert( !setClientTempNs( dbName ) );
- closeClient( dbName );
-
- if( backupOriginalFiles )
- _renameForBackup( dbName, tmpPath );
- else
- _deleteDataFiles( dbName );
-
- _replaceWithRecovered( dbName, tmpPathString.c_str() );
-
- if ( !backupOriginalFiles )
- BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( tmpPath ) );
-
- return true;
+ return true;
}
diff --git a/db/pdfile.h b/db/pdfile.h
index 21d3831f64b..384b4f032f2 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -19,7 +19,7 @@
Files:
database.ns - namespace index
database.1 - data files
- database.2
+ database.2
...
*/
@@ -45,52 +45,54 @@ bool repairDatabase(const char *ns, bool preserveClonedFilesOnFailure = false, b
void dropNS(string& dropNs);;
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication);
-/*---------------------------------------------------------------------*/
+/*---------------------------------------------------------------------*/
class PDFHeader;
class PhysicalDataFile {
- friend class DataFileMgr;
- friend class BasicCursor;
+ friend class DataFileMgr;
+ friend class BasicCursor;
public:
- PhysicalDataFile(int fn) : fileNo(fn) { }
- void open(int fileNo, const char *filename);
+ PhysicalDataFile(int fn) : fileNo(fn) { }
+ void open(int fileNo, const char *filename);
- Extent* newExtent(const char *ns, int approxSize, int loops = 0);
- PDFHeader *getHeader() { return header; }
+ Extent* newExtent(const char *ns, int approxSize, int loops = 0);
+ PDFHeader *getHeader() {
+ return header;
+ }
private:
- Extent* getExtent(DiskLoc loc);
- Extent* _getExtent(DiskLoc loc);
- Record* recordAt(DiskLoc dl);
-
- MemoryMappedFile mmf;
- PDFHeader *header;
- int __unUsEd;
- // int length;
- int fileNo;
+ Extent* getExtent(DiskLoc loc);
+ Extent* _getExtent(DiskLoc loc);
+ Record* recordAt(DiskLoc dl);
+
+ MemoryMappedFile mmf;
+ PDFHeader *header;
+ int __unUsEd;
+ // int length;
+ int fileNo;
};
class DataFileMgr {
- friend class BasicCursor;
+ friend class BasicCursor;
public:
- void init(const char *);
+ void init(const char *);
- void update(
- const char *ns,
- Record *toupdate, const DiskLoc& dl,
- const char *buf, int len, stringstream& profiling);
- DiskLoc insert(const char *ns, const void *buf, int len, bool god = false);
- void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false);
- static auto_ptr<Cursor> findAll(const char *ns);
+ void update(
+ const char *ns,
+ Record *toupdate, const DiskLoc& dl,
+ const char *buf, int len, stringstream& profiling);
+ DiskLoc insert(const char *ns, const void *buf, int len, bool god = false);
+ void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false);
+ static auto_ptr<Cursor> findAll(const char *ns);
- /* special version of insert for transaction logging -- streamlined a bit.
+ /* special version of insert for transaction logging -- streamlined a bit.
assumes ns is capped and no indexes
*/
- Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int len);
+ Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int len);
- static Extent* getExtent(const DiskLoc& dl);
- static Record* getRecord(const DiskLoc& dl);
+ static Extent* getExtent(const DiskLoc& dl);
+ static Record* getRecord(const DiskLoc& dl);
private:
- vector<PhysicalDataFile *> files;
+ vector<PhysicalDataFile *> files;
};
extern DataFileMgr theDataFileMgr;
@@ -99,9 +101,9 @@ extern DataFileMgr theDataFileMgr;
class DeletedRecord {
public:
- int lengthWithHeaders;
- int extentOfs;
- DiskLoc nextDeleted;
+ int lengthWithHeaders;
+ int extentOfs;
+ DiskLoc nextDeleted;
};
/* Record is a record in a datafile. DeletedRecord is similar but for deleted space.
@@ -113,31 +115,35 @@ public:
(11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
(11:04:33 AM) dm10gen: see class DiskLoc for more info
(11:04:43 AM) dm10gen: so that is how Record::myExtent() works
-(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must popular its extentOfs then
+(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must popular its extentOfs then
*/
class Record {
public:
- enum { HeaderSize = 16 };
- int lengthWithHeaders;
- int extentOfs;
- int nextOfs;
- int prevOfs;
- char data[4];
- int netLength() { return lengthWithHeaders - HeaderSize; }
- //void setNewLength(int netlen) { lengthWithHeaders = netlen + HeaderSize; }
-
- /* use this when a record is deleted. basically a union with next/prev fields */
- DeletedRecord& asDeleted() { return *((DeletedRecord*) this); }
-
- Extent* myExtent(const DiskLoc& myLoc) {
- return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
- }
- /* get the next record in the namespace, traversing extents as necessary */
- DiskLoc getNext(const DiskLoc& myLoc);
- DiskLoc getPrev(const DiskLoc& myLoc);
+ enum { HeaderSize = 16 };
+ int lengthWithHeaders;
+ int extentOfs;
+ int nextOfs;
+ int prevOfs;
+ char data[4];
+ int netLength() {
+ return lengthWithHeaders - HeaderSize;
+ }
+ //void setNewLength(int netlen) { lengthWithHeaders = netlen + HeaderSize; }
+
+ /* use this when a record is deleted. basically a union with next/prev fields */
+ DeletedRecord& asDeleted() {
+ return *((DeletedRecord*) this);
+ }
+
+ Extent* myExtent(const DiskLoc& myLoc) {
+ return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
+ }
+ /* get the next record in the namespace, traversing extents as necessary */
+ DiskLoc getNext(const DiskLoc& myLoc);
+ DiskLoc getPrev(const DiskLoc& myLoc);
};
-/* extents are datafile regions where all the records within the region
+/* extents are datafile regions where all the records within the region
belong to the same namespace.
(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
@@ -145,45 +151,51 @@ public:
*/
class Extent {
public:
- unsigned magic;
- DiskLoc myLoc;
- DiskLoc xnext, xprev; /* next/prev extent for this namespace */
- Namespace ns; /* which namespace this extent is for. this is just for troubleshooting really */
- int length; /* size of the extent, including these fields */
- DiskLoc firstRecord, lastRecord;
- char extentData[4];
-
- bool validates() {
- return !(firstRecord.isNull() ^ lastRecord.isNull()) &&
- length >= 0 && !myLoc.isNull();
- }
-
- void dump(iostream& s) {
- s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
- s << " ns:" << ns.buf << '\n';
- s << " size:" << length << " firstRecord:" << firstRecord.toString() << " lastRecord:" << lastRecord.toString() << '\n';
- }
-
- /* assumes already zeroed -- insufficient for block 'reuse' perhaps
- Returns a DeletedRecord location which is the data in the extent ready for us.
- Caller will need to add that to the freelist structure in namespacedetail.
- */
- DiskLoc init(const char *nsname, int _length, int _fileNo, int _offset);
-
- void assertOk() { assert(magic == 0x41424344); }
-
- Record* newRecord(int len);
-
- Record* getRecord(DiskLoc dl) {
- assert( !dl.isNull() );
- assert( dl.sameFile(myLoc) );
- int x = dl.getOfs() - myLoc.getOfs();
- assert( x > 0 );
- return (Record *) (((char *) this) + x);
- }
-
- Extent* getNextExtent() { return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext); }
- Extent* getPrevExtent() { return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev); }
+ unsigned magic;
+ DiskLoc myLoc;
+ DiskLoc xnext, xprev; /* next/prev extent for this namespace */
+ Namespace ns; /* which namespace this extent is for. this is just for troubleshooting really */
+ int length; /* size of the extent, including these fields */
+ DiskLoc firstRecord, lastRecord;
+ char extentData[4];
+
+ bool validates() {
+ return !(firstRecord.isNull() ^ lastRecord.isNull()) &&
+ length >= 0 && !myLoc.isNull();
+ }
+
+ void dump(iostream& s) {
+ s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
+ s << " ns:" << ns.buf << '\n';
+ s << " size:" << length << " firstRecord:" << firstRecord.toString() << " lastRecord:" << lastRecord.toString() << '\n';
+ }
+
+ /* assumes already zeroed -- insufficient for block 'reuse' perhaps
+ Returns a DeletedRecord location which is the data in the extent ready for us.
+ Caller will need to add that to the freelist structure in namespacedetail.
+ */
+ DiskLoc init(const char *nsname, int _length, int _fileNo, int _offset);
+
+ void assertOk() {
+ assert(magic == 0x41424344);
+ }
+
+ Record* newRecord(int len);
+
+ Record* getRecord(DiskLoc dl) {
+ assert( !dl.isNull() );
+ assert( dl.sameFile(myLoc) );
+ int x = dl.getOfs() - myLoc.getOfs();
+ assert( x > 0 );
+ return (Record *) (((char *) this) + x);
+ }
+
+ Extent* getNextExtent() {
+ return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext);
+ }
+ Extent* getPrevExtent() {
+ return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev);
+ }
};
/*
@@ -202,117 +214,121 @@ public:
/* data file header */
class PDFHeader {
public:
- int version;
- int versionMinor;
- int fileLength;
- DiskLoc unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
- int unusedLength;
- char reserved[8192 - 4*4 - 8];
-
- char data[4];
-
- static int headerSize() { return sizeof(PDFHeader) - 4; }
-
- bool currentVersion() const {
- return ( version == VERSION ) && ( versionMinor == VERSION_MINOR );
- }
-
- bool uninitialized() {
- if( version == 0 ) return true;
- return false;
- }
-
- Record* getRecord(DiskLoc dl) {
- int ofs = dl.getOfs();
- assert( ofs >= headerSize() );
- return (Record*) (((char *) this) + ofs);
- }
-
- void init(int fileno, int filelength) {
- if( uninitialized() ) {
- assert(filelength > 32768 );
- assert( headerSize() == 8192 );
- fileLength = filelength;
- version = VERSION;
- versionMinor = VERSION_MINOR;
- unused.setOfs( fileno, headerSize() );
- assert( (data-(char*)this) == headerSize() );
- unusedLength = fileLength - headerSize() - 16;
- memcpy(data+unusedLength, " \nthe end\n", 16);
- }
- }
+ int version;
+ int versionMinor;
+ int fileLength;
+ DiskLoc unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
+ int unusedLength;
+ char reserved[8192 - 4*4 - 8];
+
+ char data[4];
+
+ static int headerSize() {
+ return sizeof(PDFHeader) - 4;
+ }
+
+ bool currentVersion() const {
+ return ( version == VERSION ) && ( versionMinor == VERSION_MINOR );
+ }
+
+ bool uninitialized() {
+ if ( version == 0 ) return true;
+ return false;
+ }
+
+ Record* getRecord(DiskLoc dl) {
+ int ofs = dl.getOfs();
+ assert( ofs >= headerSize() );
+ return (Record*) (((char *) this) + ofs);
+ }
+
+ void init(int fileno, int filelength) {
+ if ( uninitialized() ) {
+ assert(filelength > 32768 );
+ assert( headerSize() == 8192 );
+ fileLength = filelength;
+ version = VERSION;
+ versionMinor = VERSION_MINOR;
+ unused.setOfs( fileno, headerSize() );
+ assert( (data-(char*)this) == headerSize() );
+ unusedLength = fileLength - headerSize() - 16;
+ memcpy(data+unusedLength, " \nthe end\n", 16);
+ }
+ }
};
#pragma pack(pop)
inline Extent* PhysicalDataFile::_getExtent(DiskLoc loc) {
- loc.assertOk();
- Extent *e = (Extent *) (((char *)header) + loc.getOfs());
- return e;
+ loc.assertOk();
+ Extent *e = (Extent *) (((char *)header) + loc.getOfs());
+ return e;
}
inline Extent* PhysicalDataFile::getExtent(DiskLoc loc) {
- Extent *e = _getExtent(loc);
- e->assertOk();
- return e;
+ Extent *e = _getExtent(loc);
+ e->assertOk();
+ return e;
}
#include "cursor.h"
-inline Record* PhysicalDataFile::recordAt(DiskLoc dl) { return header->getRecord(dl); }
+inline Record* PhysicalDataFile::recordAt(DiskLoc dl) {
+ return header->getRecord(dl);
+}
void sayDbContext(const char *msg = 0);
inline DiskLoc Record::getNext(const DiskLoc& myLoc) {
- if( nextOfs != DiskLoc::NullOfs ) {
- /* defensive */
- if( nextOfs >= 0 && nextOfs < 10 ) {
- sayDbContext("Assertion failure - Record::getNext() referencing a deleted record?");
- return DiskLoc();
- }
-
- return DiskLoc(myLoc.a(), nextOfs);
- }
- Extent *e = myExtent(myLoc);
- while( 1 ) {
- if( e->xnext.isNull() )
- return DiskLoc(); // end of table.
- e = e->xnext.ext();
- if( !e->firstRecord.isNull() )
- break;
- // entire extent could be empty, keep looking
- }
- return e->firstRecord;
+ if ( nextOfs != DiskLoc::NullOfs ) {
+ /* defensive */
+ if ( nextOfs >= 0 && nextOfs < 10 ) {
+ sayDbContext("Assertion failure - Record::getNext() referencing a deleted record?");
+ return DiskLoc();
+ }
+
+ return DiskLoc(myLoc.a(), nextOfs);
+ }
+ Extent *e = myExtent(myLoc);
+ while ( 1 ) {
+ if ( e->xnext.isNull() )
+ return DiskLoc(); // end of table.
+ e = e->xnext.ext();
+ if ( !e->firstRecord.isNull() )
+ break;
+ // entire extent could be empty, keep looking
+ }
+ return e->firstRecord;
}
inline DiskLoc Record::getPrev(const DiskLoc& myLoc) {
- if( prevOfs != DiskLoc::NullOfs )
- return DiskLoc(myLoc.a(), prevOfs);
- Extent *e = myExtent(myLoc);
- if( e->xprev.isNull() )
- return DiskLoc();
- return e->xprev.ext()->lastRecord;
+ if ( prevOfs != DiskLoc::NullOfs )
+ return DiskLoc(myLoc.a(), prevOfs);
+ Extent *e = myExtent(myLoc);
+ if ( e->xprev.isNull() )
+ return DiskLoc();
+ return e->xprev.ext()->lastRecord;
}
inline Record* DiskLoc::rec() const {
- return DataFileMgr::getRecord(*this);
+ return DataFileMgr::getRecord(*this);
}
inline BSONObj DiskLoc::obj() const {
- return BSONObj(rec());
+ return BSONObj(rec());
}
inline DeletedRecord* DiskLoc::drec() const {
- assert( fileNo != -1 );
- return (DeletedRecord*) rec();
+ assert( fileNo != -1 );
+ return (DeletedRecord*) rec();
}
inline Extent* DiskLoc::ext() const {
- return DataFileMgr::getExtent(*this);
+ return DataFileMgr::getExtent(*this);
}
-inline BtreeBucket* DiskLoc::btree() const {
- assert( fileNo != -1 );
- return (BtreeBucket*) rec()->data;
+inline BtreeBucket* DiskLoc::btree() const {
+ assert( fileNo != -1 );
+ return (BtreeBucket*) rec()->data;
}
-/*---------------------------------------------------------------------*/
+/*---------------------------------------------------------------------*/
#include "queryoptimizer.h"
#include "database.h"
@@ -329,78 +345,80 @@ inline BtreeBucket* DiskLoc::btree() const {
class FileOp {
public:
- virtual bool apply( const boost::filesystem::path &p ) = 0;
- virtual const char * op() const = 0;
+ virtual bool apply( const boost::filesystem::path &p ) = 0;
+ virtual const char * op() const = 0;
};
inline void _applyOpToDataFiles( const char *database, FileOp &fo, const char *path = dbpath ) {
- string c = database;
- c += '.';
- boost::filesystem::path p(path);
- boost::filesystem::path q;
- q = p / (c+"ns");
- bool ok = false;
- BOOST_CHECK_EXCEPTION( ok = fo.apply( q ) );
- if( ok )
- log() << fo.op() << " file " << q.string() << '\n';
- int i = 0;
- int extra = 10; // should not be necessary, this is defensive in case there are missing files
- while( 1 ) {
- assert( i <= DiskLoc::MaxFiles );
- stringstream ss;
- ss << c << i;
- q = p / ss.str();
- BOOST_CHECK_EXCEPTION( ok = fo.apply(q) );
- if( ok ) {
- log() << fo.op() << " file " << q.string() << '\n';
- if( extra != 10 )
- log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
- }
- else if( --extra <= 0 )
- break;
- i++;
- }
+ string c = database;
+ c += '.';
+ boost::filesystem::path p(path);
+ boost::filesystem::path q;
+ q = p / (c+"ns");
+ bool ok = false;
+ BOOST_CHECK_EXCEPTION( ok = fo.apply( q ) );
+ if ( ok )
+ log() << fo.op() << " file " << q.string() << '\n';
+ int i = 0;
+ int extra = 10; // should not be necessary, this is defensive in case there are missing files
+ while ( 1 ) {
+ assert( i <= DiskLoc::MaxFiles );
+ stringstream ss;
+ ss << c << i;
+ q = p / ss.str();
+ BOOST_CHECK_EXCEPTION( ok = fo.apply(q) );
+ if ( ok ) {
+ log() << fo.op() << " file " << q.string() << '\n';
+ if ( extra != 10 )
+ log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
+ }
+ else if ( --extra <= 0 )
+ break;
+ i++;
+ }
}
-inline void _deleteDataFiles(const char *database) {
- class : public FileOp {
- virtual bool apply( const boost::filesystem::path &p ) {
- return boost::filesystem::remove( p );
- }
- virtual const char * op() const { return "remove"; }
- } deleter;
- _applyOpToDataFiles( database, deleter );
+inline void _deleteDataFiles(const char *database) {
+ class : public FileOp {
+ virtual bool apply( const boost::filesystem::path &p ) {
+ return boost::filesystem::remove( p );
+ }
+ virtual const char * op() const {
+ return "remove";
+ }
+ } deleter;
+ _applyOpToDataFiles( database, deleter );
}
-inline NamespaceIndex* nsindex(const char *ns) {
- DEV {
- char buf[256];
- nsToClient(ns, buf);
- if( database->name != buf ) {
- cout << "ERROR: attempt to write to wrong database database\n";
- cout << " ns:" << ns << '\n';
- cout << " database->name:" << database->name << endl;
- assert( database->name == buf );
- }
- }
- return &database->namespaceIndex;
+inline NamespaceIndex* nsindex(const char *ns) {
+ DEV {
+ char buf[256];
+ nsToClient(ns, buf);
+ if ( database->name != buf ) {
+ cout << "ERROR: attempt to write to wrong database database\n";
+ cout << " ns:" << ns << '\n';
+ cout << " database->name:" << database->name << endl;
+ assert( database->name == buf );
+ }
+ }
+ return &database->namespaceIndex;
}
-inline NamespaceDetails* nsdetails(const char *ns) {
- return nsindex(ns)->details(ns);
+inline NamespaceDetails* nsdetails(const char *ns) {
+ return nsindex(ns)->details(ns);
}
-inline PhysicalDataFile& DiskLoc::pdf() const {
- assert( fileNo != -1 );
- return *database->getFile(fileNo);
+inline PhysicalDataFile& DiskLoc::pdf() const {
+ assert( fileNo != -1 );
+ return *database->getFile(fileNo);
}
inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
- assert( dl.a() != -1 );
- return database->getFile(dl.a())->getExtent(dl);
+ assert( dl.a() != -1 );
+ return database->getFile(dl.a())->getExtent(dl);
}
inline Record* DataFileMgr::getRecord(const DiskLoc& dl) {
- assert( dl.a() != -1 );
- return database->getFile(dl.a())->recordAt(dl);
+ assert( dl.a() != -1 );
+ return database->getFile(dl.a())->recordAt(dl);
}
diff --git a/db/query.cpp b/db/query.cpp
index 3bdbf721a8f..1cea5f8fa4d 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -31,7 +31,7 @@
#include "replset.h"
#include "scanandorder.h"
-/* We cut off further objects once we cross this threshold; thus, you might get
+/* We cut off further objects once we cross this threshold; thus, you might get
a little bit more than this, it is a threshold rather than a limit.
*/
const int MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
@@ -44,7 +44,7 @@ extern bool useCursors;
void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e);
-/* todo: _ cache query plans
+/* todo: _ cache query plans
_ use index on partial match with the query
parameters
@@ -53,158 +53,158 @@ void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e);
simpleKeyMatch - set to true if the query is purely for a single key value
unchanged otherwise.
*/
-auto_ptr<Cursor> getIndexCursor(const char *ns, BSONObj& query, BSONObj& order, bool *simpleKeyMatch = 0, bool *isSorted = 0, string *hint = 0) {
- NamespaceDetails *d = nsdetails(ns);
- if( d == 0 ) return auto_ptr<Cursor>();
+auto_ptr<Cursor> getIndexCursor(const char *ns, BSONObj& query, BSONObj& order, bool *simpleKeyMatch = 0, bool *isSorted = 0, string *hint = 0) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 ) return auto_ptr<Cursor>();
- if( hint && !hint->empty() ) {
+ if ( hint && !hint->empty() ) {
/* todo: more work needed. doesn't handle $lt & $gt for example.
waiting for query optimizer rewrite (see queryoptimizer.h) before finishing the work.
*/
- for(int i = 0; i < d->nIndexes; i++ ) {
+ for (int i = 0; i < d->nIndexes; i++ ) {
IndexDetails& ii = d->indexes[i];
- if( ii.indexName() == *hint ) {
+ if ( ii.indexName() == *hint ) {
BSONObj startKey = ii.getKeyFromQuery(query);
int direction = 1;
- if( simpleKeyMatch )
+ if ( simpleKeyMatch )
*simpleKeyMatch = query.nFields() == startKey.nFields();
- if( isSorted ) *isSorted = false;
- return auto_ptr<Cursor>(
- new BtreeCursor(ii, startKey, direction, query));
+ if ( isSorted ) *isSorted = false;
+ return auto_ptr<Cursor>(
+ new BtreeCursor(ii, startKey, direction, query));
}
}
}
- if( !order.isEmpty() ) {
- set<string> orderFields;
- order.getFieldNames(orderFields);
- // order by
- for(int i = 0; i < d->nIndexes; i++ ) {
- BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
- assert( strcmp(ns, idxInfo.getStringField("ns")) == 0 );
- BSONObj idxKey = idxInfo.getObjectField("key");
- set<string> keyFields;
- idxKey.getFieldNames(keyFields);
- if( keyFields == orderFields ) {
- bool reverse =
- order.firstElement().number() < 0;
- BSONObjBuilder b;
- DEV cout << " using index " << d->indexes[i].indexNamespace() << '\n';
- if( isSorted )
- *isSorted = true;
-
- return auto_ptr<Cursor>(new BtreeCursor(d->indexes[i], BSONObj(), reverse ? -1 : 1, query));
- }
- }
- }
-
- // queryFields, e.g. { 'name' }
- set<string> queryFields;
- query.getFieldNames(queryFields);
-
- // regular query without order by
- for(int i = 0; i < d->nIndexes; i++ ) {
- BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
- BSONObj idxKey = idxInfo.getObjectField("key");
- set<string> keyFields;
- idxKey.getFieldNames(keyFields);
+ if ( !order.isEmpty() ) {
+ set<string> orderFields;
+ order.getFieldNames(orderFields);
+ // order by
+ for (int i = 0; i < d->nIndexes; i++ ) {
+ BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
+ assert( strcmp(ns, idxInfo.getStringField("ns")) == 0 );
+ BSONObj idxKey = idxInfo.getObjectField("key");
+ set<string> keyFields;
+ idxKey.getFieldNames(keyFields);
+ if ( keyFields == orderFields ) {
+ bool reverse =
+ order.firstElement().number() < 0;
+ BSONObjBuilder b;
+ DEV cout << " using index " << d->indexes[i].indexNamespace() << '\n';
+ if ( isSorted )
+ *isSorted = true;
+
+ return auto_ptr<Cursor>(new BtreeCursor(d->indexes[i], BSONObj(), reverse ? -1 : 1, query));
+ }
+ }
+ }
+
+ // queryFields, e.g. { 'name' }
+ set<string> queryFields;
+ query.getFieldNames(queryFields);
+
+ // regular query without order by
+ for (int i = 0; i < d->nIndexes; i++ ) {
+ BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
+ BSONObj idxKey = idxInfo.getObjectField("key");
+ set<string> keyFields;
+ idxKey.getFieldNames(keyFields);
// keyFields: e.g. { "name" }
bool match = keyFields == queryFields;
- if( 0 && !match && queryFields.size() > 1 && simpleKeyMatch == 0 && keyFields.size() == 1 ) {
+ if ( 0 && !match && queryFields.size() > 1 && simpleKeyMatch == 0 && keyFields.size() == 1 ) {
// TEMP
string s = *(keyFields.begin());
match = queryFields.count(s) == 1;
}
- if( match ) {
- bool simple = true;
- //BSONObjBuilder b;
- BSONObj q = query.extractFieldsUnDotted(idxKey);
+ if ( match ) {
+ bool simple = true;
+ //BSONObjBuilder b;
+ BSONObj q = query.extractFieldsUnDotted(idxKey);
assert(q.objsize() != 0); // guard against a seg fault if details is 0
- /* regexp: only supported if form is /^text/ */
- BSONObjBuilder b2;
- BSONObjIterator it(q);
- bool first = true;
- while( it.more() ) {
- BSONElement e = it.next();
- if( e.eoo() )
- break;
-
- // GT/LT
- if( e.type() == Object ) {
- int op = getGtLtOp(e);
- if( op ) {
- if( !first || !it.next().eoo() ) {
- // compound keys with GT/LT not supported yet via index.
- goto fail;
- }
- if( op >= JSMatcher::opIN ) {
- // $in does not use an index (at least yet, should when # of elems is tiny)
+ /* regexp: only supported if form is /^text/ */
+ BSONObjBuilder b2;
+ BSONObjIterator it(q);
+ bool first = true;
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( e.eoo() )
+ break;
+
+ // GT/LT
+ if ( e.type() == Object ) {
+ int op = getGtLtOp(e);
+ if ( op ) {
+ if ( !first || !it.next().eoo() ) {
+ // compound keys with GT/LT not supported yet via index.
+ goto fail;
+ }
+ if ( op >= JSMatcher::opIN ) {
+ // $in does not use an index (at least yet, should when # of elems is tiny)
// likewise $ne
- goto fail;
- }
-
- {
- BSONObjIterator k(e.embeddedObject());
- k.next();
- if( !k.next().eoo() ) {
- /* compound query like { $lt : 9, $gt : 2 }
- for those our method below won't work.
- need more work on "stopOnMiss" in general -- may
- be issues with it. so fix this to use index after
- that is fixed.
- */
- OCCASIONALLY cout << "finish query optimizer for lt gt compound\n";
- goto fail;
- }
- }
-
- int direction = - JSMatcher::opDirection(op);
- return auto_ptr<Cursor>( new BtreeCursor(
- d->indexes[i],
- BSONObj(),
- direction, query) );
- }
- }
-
- first = false;
- if( e.type() == RegEx ) {
- simple = false;
- if( *e.regexFlags() )
- goto fail;
- const char *re = e.regex();
- const char *p = re;
- if( *p++ != '^' ) goto fail;
- while( *p ) {
- if( *p == ' ' || (*p>='0'&&*p<='9') || (*p>='@'&&*p<='Z') || (*p>='a'&&*p<='z') )
- ;
- else
- goto fail;
- p++;
- }
- if( it.more() && !it.next().eoo() ) // we must be the last part of the key (for now until we are smarter)
- goto fail;
- // ok!
+ goto fail;
+ }
+
+ {
+ BSONObjIterator k(e.embeddedObject());
+ k.next();
+ if ( !k.next().eoo() ) {
+ /* compound query like { $lt : 9, $gt : 2 }
+ for those our method below won't work.
+ need more work on "stopOnMiss" in general -- may
+ be issues with it. so fix this to use index after
+ that is fixed.
+ */
+ OCCASIONALLY cout << "finish query optimizer for lt gt compound\n";
+ goto fail;
+ }
+ }
+
+ int direction = - JSMatcher::opDirection(op);
+ return auto_ptr<Cursor>( new BtreeCursor(
+ d->indexes[i],
+ BSONObj(),
+ direction, query) );
+ }
+ }
+
+ first = false;
+ if ( e.type() == RegEx ) {
+ simple = false;
+ if ( *e.regexFlags() )
+ goto fail;
+ const char *re = e.regex();
+ const char *p = re;
+ if ( *p++ != '^' ) goto fail;
+ while ( *p ) {
+ if ( *p == ' ' || (*p>='0'&&*p<='9') || (*p>='@'&&*p<='Z') || (*p>='a'&&*p<='z') )
+ ;
+ else
+ goto fail;
+ p++;
+ }
+ if ( it.more() && !it.next().eoo() ) // we must be the last part of the key (for now until we are smarter)
+ goto fail;
+ // ok!
b2.append(e.fieldName(), re+1);
- break;
- }
- else {
- b2.append(e);
- //appendElementHandlingGtLt(b2, e);
- }
- }
- BSONObj q2 = b2.done();
- DEV cout << "using index " << d->indexes[i].indexNamespace() << endl;
- if( simple && simpleKeyMatch ) *simpleKeyMatch = true;
- return auto_ptr<Cursor>(
- new BtreeCursor(d->indexes[i], q2, 1, query));
- }
- }
+ break;
+ }
+ else {
+ b2.append(e);
+ //appendElementHandlingGtLt(b2, e);
+ }
+ }
+ BSONObj q2 = b2.done();
+ DEV cout << "using index " << d->indexes[i].indexNamespace() << endl;
+ if ( simple && simpleKeyMatch ) *simpleKeyMatch = true;
+ return auto_ptr<Cursor>(
+ new BtreeCursor(d->indexes[i], q2, 1, query));
+ }
+ }
fail:
- DEV cout << "getIndexCursor fail " << ns << '\n';
- return auto_ptr<Cursor>();
+ DEV cout << "getIndexCursor fail " << ns << '\n';
+ return auto_ptr<Cursor>();
}
/* ns: namespace, e.g. <database>.<collection>
@@ -212,237 +212,239 @@ fail:
justOne: stop after 1 match
*/
int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god) {
- if( strstr(ns, ".system.") && !god ) {
- /*if( strstr(ns, ".system.namespaces") ){
- cout << "info: delete on system namespace " << ns << '\n';
- }
- else if( strstr(ns, ".system.indexes") ) {
- cout << "info: delete on system namespace " << ns << '\n';
- }
- else*/ {
- cout << "ERROR: attempt to delete in system namespace " << ns << endl;
- return -1;
- }
- }
-
- int nDeleted = 0;
- BSONObj order;
- auto_ptr<Cursor> c = getIndexCursor(ns, pattern, order);
- if( c.get() == 0 )
- c = theDataFileMgr.findAll(ns);
+ if ( strstr(ns, ".system.") && !god ) {
+ /*if( strstr(ns, ".system.namespaces") ){
+ cout << "info: delete on system namespace " << ns << '\n';
+ }
+ else if( strstr(ns, ".system.indexes") ) {
+ cout << "info: delete on system namespace " << ns << '\n';
+ }
+ else*/ {
+ cout << "ERROR: attempt to delete in system namespace " << ns << endl;
+ return -1;
+ }
+ }
+
+ int nDeleted = 0;
+ BSONObj order;
+ auto_ptr<Cursor> c = getIndexCursor(ns, pattern, order);
+ if ( c.get() == 0 )
+ c = theDataFileMgr.findAll(ns);
JSMatcher matcher(pattern, c->indexKeyPattern());
- while( c->ok() ) {
- Record *r = c->_current();
- DiskLoc rloc = c->currLoc();
- BSONObj js(r);
+ while ( c->ok() ) {
+ Record *r = c->_current();
+ DiskLoc rloc = c->currLoc();
+ BSONObj js(r);
- bool deep;
- if( !matcher.matches(js, &deep) ) {
+ bool deep;
+ if ( !matcher.matches(js, &deep) ) {
c->advance(); // advance must be after noMoreMatches() because it uses currKey()
- }
- else {
+ }
+ else {
c->advance(); // must advance before deleting as the next ptr will die
- assert( !deep || !c->getsetdup(rloc) ); // can't be a dup, we deleted it!
- if( !justOne )
- c->noteLocation();
+ assert( !deep || !c->getsetdup(rloc) ); // can't be a dup, we deleted it!
+ if ( !justOne )
+ c->noteLocation();
- theDataFileMgr.deleteRecord(ns, r, rloc);
- nDeleted++;
- if( justOne )
- break;
- c->checkLocation();
- }
- }
+ theDataFileMgr.deleteRecord(ns, r, rloc);
+ nDeleted++;
+ if ( justOne )
+ break;
+ c->checkLocation();
+ }
+ }
- return nDeleted;
+ return nDeleted;
}
-struct Mod {
- enum Op { INC, SET } op;
- const char *fieldName;
+struct Mod {
+ enum Op { INC, SET } op;
+ const char *fieldName;
double *ndouble;
int *nint;
- void setn(double n) {
- if( ndouble ) *ndouble = n;
+ void setn(double n) {
+ if ( ndouble ) *ndouble = n;
else *nint = (int) n;
}
- double getn() { return ndouble ? *ndouble : *nint; }
+ double getn() {
+ return ndouble ? *ndouble : *nint;
+ }
int type;
- static void getMods(vector<Mod>& mods, BSONObj from);
- static void applyMods(vector<Mod>& mods, BSONObj obj);
+ static void getMods(vector<Mod>& mods, BSONObj from);
+ static void applyMods(vector<Mod>& mods, BSONObj obj);
};
-void Mod::applyMods(vector<Mod>& mods, BSONObj obj) {
- for( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
- Mod& m = *i;
- BSONElement e = obj.findElement(m.fieldName);
- if( e.isNumber() ) {
- if( m.op == INC ) {
+void Mod::applyMods(vector<Mod>& mods, BSONObj obj) {
+ for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
+ Mod& m = *i;
+ BSONElement e = obj.findElement(m.fieldName);
+ if ( e.isNumber() ) {
+ if ( m.op == INC ) {
e.setNumber( e.number() + m.getn() );
m.setn( e.number() );
- // *m.n = e.number() += *m.n;
+ // *m.n = e.number() += *m.n;
} else {
e.setNumber( m.getn() ); // $set or $SET
}
- }
- }
+ }
+ }
}
-/* get special operations like $inc
+/* get special operations like $inc
{ $inc: { a:1, b:1 } }
{ $set: { a:77 } }
NOTE: MODIFIES source from object!
*/
-void Mod::getMods(vector<Mod>& mods, BSONObj from) {
- BSONObjIterator it(from);
- while( it.more() ) {
- BSONElement e = it.next();
- const char *fn = e.fieldName();
- if( *fn == '$' && e.type() == Object &&
- fn[4] == 0 ) {
- BSONObj j = e.embeddedObject();
- BSONObjIterator jt(j);
- Op op = Mod::SET;
- if( strcmp("$inc",fn) == 0 ) {
- op = Mod::INC;
- // we rename to $SET instead of $set so that on an op like
- // { $set: {x:1}, $inc: {y:1} }
- // we don't get two "$set" fields which isn't allowed
- strcpy((char *) fn, "$SET");
- }
- while( jt.more() ) {
- BSONElement f = jt.next();
- if( f.eoo() )
- break;
- Mod m;
- m.op = op;
- m.fieldName = f.fieldName();
- if( f.isNumber() ) {
- if( f.type() == NumberDouble ) {
+void Mod::getMods(vector<Mod>& mods, BSONObj from) {
+ BSONObjIterator it(from);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ const char *fn = e.fieldName();
+ if ( *fn == '$' && e.type() == Object &&
+ fn[4] == 0 ) {
+ BSONObj j = e.embeddedObject();
+ BSONObjIterator jt(j);
+ Op op = Mod::SET;
+ if ( strcmp("$inc",fn) == 0 ) {
+ op = Mod::INC;
+ // we rename to $SET instead of $set so that on an op like
+ // { $set: {x:1}, $inc: {y:1} }
+ // we don't get two "$set" fields which isn't allowed
+ strcpy((char *) fn, "$SET");
+ }
+ while ( jt.more() ) {
+ BSONElement f = jt.next();
+ if ( f.eoo() )
+ break;
+ Mod m;
+ m.op = op;
+ m.fieldName = f.fieldName();
+ if ( f.isNumber() ) {
+ if ( f.type() == NumberDouble ) {
m.ndouble = (double *) f.value();
m.nint = 0;
}
- else {
+ else {
m.ndouble = 0;
m.nint = (int *) f.value();
}
- mods.push_back( m );
- }
- }
- }
- }
+ mods.push_back( m );
+ }
+ }
+ }
+ }
}
/* todo:
_ smart requery find record immediately
- returns:
+ returns:
2: we did applyMods() but didn't logOp()
- 5: we did applyMods() and did logOp() (so don't do it again)
+ 5: we did applyMods() and did logOp() (so don't do it again)
(clean these up later...)
*/
int _updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss, bool logop=false) {
- //cout << "TEMP BAD";
- //lrutest.find(updateobj);
-
- int profile = database->profile;
-
- // cout << "update ns:" << ns << " objsize:" << updateobj.objsize() << " queryobjsize:" <<
- // pattern.objsize();
-
- if( strstr(ns, ".system.") ) {
- cout << "\nERROR: attempt to update in system namespace " << ns << endl;
- ss << " can't update system namespace ";
- return 0;
- }
-
- int nscanned = 0;
- {
- BSONObj order;
- auto_ptr<Cursor> c = getIndexCursor(ns, pattern, order);
- if( c.get() == 0 )
- c = theDataFileMgr.findAll(ns);
- JSMatcher matcher(pattern, c->indexKeyPattern());
- while( c->ok() ) {
- Record *r = c->_current();
- nscanned++;
- BSONObj js(r);
- if( !matcher.matches(js) ) {
- }
- else {
- /* note: we only update one row and quit. if you do multiple later,
- be careful or multikeys in arrays could break things badly. best
- to only allow updating a single row with a multikey lookup.
- */
-
- if( profile )
- ss << " nscanned:" << nscanned;
-
- /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
- regular ones at the moment. */
- const char *firstField = updateobj.firstElement().fieldName();
- if( firstField[0] == '$' ) {
- vector<Mod> mods;
- Mod::getMods(mods, updateobj);
+ //cout << "TEMP BAD";
+ //lrutest.find(updateobj);
+
+ int profile = database->profile;
+
+ // cout << "update ns:" << ns << " objsize:" << updateobj.objsize() << " queryobjsize:" <<
+ // pattern.objsize();
+
+ if ( strstr(ns, ".system.") ) {
+ cout << "\nERROR: attempt to update in system namespace " << ns << endl;
+ ss << " can't update system namespace ";
+ return 0;
+ }
+
+ int nscanned = 0;
+ {
+ BSONObj order;
+ auto_ptr<Cursor> c = getIndexCursor(ns, pattern, order);
+ if ( c.get() == 0 )
+ c = theDataFileMgr.findAll(ns);
+ JSMatcher matcher(pattern, c->indexKeyPattern());
+ while ( c->ok() ) {
+ Record *r = c->_current();
+ nscanned++;
+ BSONObj js(r);
+ if ( !matcher.matches(js) ) {
+ }
+ else {
+ /* note: we only update one row and quit. if you do multiple later,
+ be careful or multikeys in arrays could break things badly. best
+ to only allow updating a single row with a multikey lookup.
+ */
+
+ if ( profile )
+ ss << " nscanned:" << nscanned;
+
+ /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
+ regular ones at the moment. */
+ const char *firstField = updateobj.firstElement().fieldName();
+ if ( firstField[0] == '$' ) {
+ vector<Mod> mods;
+ Mod::getMods(mods, updateobj);
NamespaceDetailsTransient& ndt = NamespaceDetailsTransient::get(ns);
set<string>& idxKeys = ndt.indexKeys();
- for( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
- if( idxKeys.count(i->fieldName) ) {
+ for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
+ if ( idxKeys.count(i->fieldName) ) {
uassert("can't $inc/$set an indexed field", false);
}
}
- Mod::applyMods(mods, c->currLoc().obj());
- if( profile )
- ss << " fastmod ";
- if( logop ) {
- if( mods.size() ) {
- logOp("u", ns, updateobj, &pattern, &upsert);
- return 5;
- }
- }
- return 2;
- }
-
- theDataFileMgr.update(ns, r, c->currLoc(), updateobj.objdata(), updateobj.objsize(), ss);
- return 1;
- }
- c->advance();
- }
- }
-
- if( profile )
- ss << " nscanned:" << nscanned;
-
- if( upsert ) {
- if( updateobj.firstElement().fieldName()[0] == '$' ) {
- /* upsert of an $inc. build a default */
- vector<Mod> mods;
- Mod::getMods(mods, updateobj);
- BSONObjBuilder b;
- b.appendElements(pattern);
- for( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ )
- b.append(i->fieldName, i->getn());
- BSONObj obj = b.done();
- theDataFileMgr.insert(ns, (void*) obj.objdata(), obj.objsize());
- if( profile )
- ss << " fastmodinsert ";
- return 3;
- }
- if( profile )
- ss << " upsert ";
- theDataFileMgr.insert(ns, (void*) updateobj.objdata(), updateobj.objsize());
- return 4;
- }
- return 0;
+ Mod::applyMods(mods, c->currLoc().obj());
+ if ( profile )
+ ss << " fastmod ";
+ if ( logop ) {
+ if ( mods.size() ) {
+ logOp("u", ns, updateobj, &pattern, &upsert);
+ return 5;
+ }
+ }
+ return 2;
+ }
+
+ theDataFileMgr.update(ns, r, c->currLoc(), updateobj.objdata(), updateobj.objsize(), ss);
+ return 1;
+ }
+ c->advance();
+ }
+ }
+
+ if ( profile )
+ ss << " nscanned:" << nscanned;
+
+ if ( upsert ) {
+ if ( updateobj.firstElement().fieldName()[0] == '$' ) {
+ /* upsert of an $inc. build a default */
+ vector<Mod> mods;
+ Mod::getMods(mods, updateobj);
+ BSONObjBuilder b;
+ b.appendElements(pattern);
+ for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ )
+ b.append(i->fieldName, i->getn());
+ BSONObj obj = b.done();
+ theDataFileMgr.insert(ns, (void*) obj.objdata(), obj.objsize());
+ if ( profile )
+ ss << " fastmodinsert ";
+ return 3;
+ }
+ if ( profile )
+ ss << " upsert ";
+ theDataFileMgr.insert(ns, (void*) updateobj.objdata(), updateobj.objsize());
+ return 4;
+ }
+ return 0;
}
-/* todo: we can optimize replication by just doing insert when an upsert triggers.
+/* todo: we can optimize replication by just doing insert when an upsert triggers.
*/
void updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss) {
- int rc = _updateObjects(ns, updateobj, pattern, upsert, ss, true);
- if( rc != 5 )
- logOp("u", ns, updateobj, &pattern, &upsert);
+ int rc = _updateObjects(ns, updateobj, pattern, upsert, ss, true);
+ if ( rc != 5 )
+ logOp("u", ns, updateobj, &pattern, &upsert);
}
int queryTraceLevel = 0;
@@ -452,31 +454,31 @@ int initialExtentSize(int len);
bool _runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl);
-bool runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- try {
- return _runCommands(ns, jsobj, ss, b, anObjBuilder, fromRepl);
- }
- catch( AssertionException e ) {
- if( !e.msg.empty() )
+bool runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
+ try {
+ return _runCommands(ns, jsobj, ss, b, anObjBuilder, fromRepl);
+ }
+ catch ( AssertionException e ) {
+ if ( !e.msg.empty() )
anObjBuilder.append("assertion", e.msg);
- }
- ss << " assertion ";
- anObjBuilder.append("errmsg", "db assertion failure");
- anObjBuilder.append("ok", 0.0);
- BSONObj x = anObjBuilder.done();
- b.append((void*) x.objdata(), x.objsize());
- return true;
+ }
+ ss << " assertion ";
+ anObjBuilder.append("errmsg", "db assertion failure");
+ anObjBuilder.append("ok", 0.0);
+ BSONObj x = anObjBuilder.done();
+ b.append((void*) x.objdata(), x.objsize());
+ return true;
}
int nCaught = 0;
void killCursors(int n, long long *ids) {
- int k = 0;
- for( int i = 0; i < n; i++ ) {
- if( ClientCursor::erase(ids[i]) )
- k++;
- }
- log() << "killCursors: found " << k << " of " << n << '\n';
+ int k = 0;
+ for ( int i = 0; i < n; i++ ) {
+ if ( ClientCursor::erase(ids[i]) )
+ k++;
+ }
+ log() << "killCursors: found " << k << " of " << n << '\n';
}
// order.$natural sets natural order direction
@@ -485,77 +487,77 @@ auto_ptr<Cursor> findTableScan(const char *ns, BSONObj& order, bool *isSorted=0)
BSONObj id_obj = fromjson("{_id:ObjId()}");
BSONObj empty_obj = fromjson("{}");
-/* { count: "collectionname"[, query: <query>] }
+/* { count: "collectionname"[, query: <query>] }
returns -1 on error.
*/
-int runCount(const char *ns, BSONObj& cmd, string& err) {
- NamespaceDetails *d = nsdetails(ns);
- if( d == 0 ) {
- err = "ns does not exist";
- return -1;
- }
-
- BSONObj query = cmd.getObjectField("query");
-
- if( query.isEmpty() ) {
- // count of all objects
- return (int) d->nrecords;
- }
-
- auto_ptr<Cursor> c;
-
- bool simpleKeyToMatch = false;
- c = getIndexCursor(ns, query, empty_obj, &simpleKeyToMatch);
-
- if( c.get() ) {
- if( simpleKeyToMatch ) {
- /* Here we only look at the btree keys to determine if a match, instead of looking
- into the records, which would be much slower.
- */
- int count = 0;
- BtreeCursor *bc = dynamic_cast<BtreeCursor *>(c.get());
- if( c->ok() && !query.woCompare( bc->currKeyNode().key, false ) ) {
- BSONObj firstMatch = bc->currKeyNode().key;
- count++;
- while ( c->advance() ) {
- if( firstMatch != bc->currKeyNode().key )
- break;
- count++;
- }
- }
- return count;
- }
- } else {
- c = findTableScan(ns, empty_obj);
- }
-
- int count = 0;
- auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
- while( c->ok() ) {
- BSONObj js = c->current();
- bool deep;
- if( !matcher->matches(js, &deep) ) {
- }
- else if( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
- // got a match.
- count++;
- }
- c->advance();
- }
- return count;
+int runCount(const char *ns, BSONObj& cmd, string& err) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 ) {
+ err = "ns does not exist";
+ return -1;
+ }
+
+ BSONObj query = cmd.getObjectField("query");
+
+ if ( query.isEmpty() ) {
+ // count of all objects
+ return (int) d->nrecords;
+ }
+
+ auto_ptr<Cursor> c;
+
+ bool simpleKeyToMatch = false;
+ c = getIndexCursor(ns, query, empty_obj, &simpleKeyToMatch);
+
+ if ( c.get() ) {
+ if ( simpleKeyToMatch ) {
+ /* Here we only look at the btree keys to determine if a match, instead of looking
+ into the records, which would be much slower.
+ */
+ int count = 0;
+ BtreeCursor *bc = dynamic_cast<BtreeCursor *>(c.get());
+ if ( c->ok() && !query.woCompare( bc->currKeyNode().key, false ) ) {
+ BSONObj firstMatch = bc->currKeyNode().key;
+ count++;
+ while ( c->advance() ) {
+ if ( firstMatch != bc->currKeyNode().key )
+ break;
+ count++;
+ }
+ }
+ return count;
+ }
+ } else {
+ c = findTableScan(ns, empty_obj);
+ }
+
+ int count = 0;
+ auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
+ while ( c->ok() ) {
+ BSONObj js = c->current();
+ bool deep;
+ if ( !matcher->matches(js, &deep) ) {
+ }
+ else if ( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
+ // got a match.
+ count++;
+ }
+ c->advance();
+ }
+ return count;
}
/* This is for languages whose "objects" are not well ordered (JSON is well ordered).
- [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
+ [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
*/
-inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
+inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
/* note: this is slow, but that is ok as order will have very few pieces */
BSONObjBuilder b;
char p[2] = "0";
- while( 1 ) {
+ while ( 1 ) {
BSONObj j = order.getObjectField(p);
- if( j.isEmpty() )
+ if ( j.isEmpty() )
break;
BSONElement e = j.firstElement();
uassert("bad order array", !e.eoo());
@@ -568,41 +570,41 @@ inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
return b.doneAndDecouple();
}
-QueryResult* runQuery(Message& message, const char *ns, int ntoskip, int _ntoreturn, BSONObj jsobj,
- auto_ptr< set<string> > filter, stringstream& ss, int queryOptions)
+QueryResult* runQuery(Message& message, const char *ns, int ntoskip, int _ntoreturn, BSONObj jsobj,
+ auto_ptr< set<string> > filter, stringstream& ss, int queryOptions)
{
Timer t;
int nscanned = 0;
- bool wantMore = true;
- int ntoreturn = _ntoreturn;
- if( _ntoreturn < 0 ) {
- ntoreturn = -_ntoreturn;
- wantMore = false;
- }
- ss << "query " << ns << " ntoreturn:" << ntoreturn;
-
- int n = 0;
- BufBuilder b(32768);
- BSONObjBuilder cmdResBuf;
- long long cursorid = 0;
-
- b.skip(sizeof(QueryResult));
-
- /* we assume you are using findOne() for running a cmd... */
- if( ntoreturn == 1 && runCommands(ns, jsobj, ss, b, cmdResBuf, false) ) {
- n = 1;
- }
- else {
+ bool wantMore = true;
+ int ntoreturn = _ntoreturn;
+ if ( _ntoreturn < 0 ) {
+ ntoreturn = -_ntoreturn;
+ wantMore = false;
+ }
+ ss << "query " << ns << " ntoreturn:" << ntoreturn;
+
+ int n = 0;
+ BufBuilder b(32768);
+ BSONObjBuilder cmdResBuf;
+ long long cursorid = 0;
+
+ b.skip(sizeof(QueryResult));
+
+ /* we assume you are using findOne() for running a cmd... */
+ if ( ntoreturn == 1 && runCommands(ns, jsobj, ss, b, cmdResBuf, false) ) {
+ n = 1;
+ }
+ else {
uassert("not master", isMaster() || (queryOptions & Option_SlaveOk));
string hint;
bool explain = false;
bool _gotquery = false;
- BSONObj query;// = jsobj.getObjectField("query");
+ BSONObj query;// = jsobj.getObjectField("query");
{
BSONElement e = jsobj.findElement("query");
- if( !e.eoo() && (e.type() == Object || e.type() == Array) ) {
+ if ( !e.eoo() && (e.type() == Object || e.type() == Array) ) {
query = e.embeddedObject();
_gotquery = true;
}
@@ -610,264 +612,264 @@ QueryResult* runQuery(Message& message, const char *ns, int ntoskip, int _ntoret
BSONObj order;
{
BSONElement e = jsobj.findElement("orderby");
- if( !e.eoo() ) {
+ if ( !e.eoo() ) {
order = e.embeddedObjectUserCheck();
- if( e.type() == Array )
+ if ( e.type() == Array )
order = transformOrderFromArrayFormat(order);
}
}
- if( !_gotquery && order.isEmpty() )
- query = jsobj;
+ if ( !_gotquery && order.isEmpty() )
+ query = jsobj;
else {
explain = jsobj.getBoolField("$explain");
hint = jsobj.getStringField("$hint");
}
- /* The ElemIter will not be happy if this isn't really an object. So throw exception
- here when that is true.
- (Which may indicate bad data from appserver?)
- */
- if( query.objsize() == 0 ) {
- cout << "Bad query object?\n jsobj:";
- cout << jsobj.toString() << "\n query:";
- cout << query.toString() << endl;
+ /* The ElemIter will not be happy if this isn't really an object. So throw exception
+ here when that is true.
+ (Which may indicate bad data from appserver?)
+ */
+ if ( query.objsize() == 0 ) {
+ cout << "Bad query object?\n jsobj:";
+ cout << jsobj.toString() << "\n query:";
+ cout << query.toString() << endl;
uassert("bad query object", false);
- }
-
- bool isSorted = false;
- auto_ptr<Cursor> c = getSpecialCursor(ns);
-
- if( c.get() == 0 )
- c = getIndexCursor(ns, query, order, 0, &isSorted, &hint);
- if( c.get() == 0 )
- c = findTableScan(ns, order, &isSorted);
-
- auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
- JSMatcher &debug1 = *matcher;
- assert( debug1.getN() < 1000 );
-
- auto_ptr<ScanAndOrder> so;
- bool ordering = false;
- if( !order.isEmpty() && !isSorted ) {
- ordering = true;
- ss << " scanAndOrder ";
- so = auto_ptr<ScanAndOrder>(new ScanAndOrder(ntoskip, ntoreturn,order));
- wantMore = false;
- // scanAndOrder(b, c.get(), order, ntoreturn);
- }
-
- while( c->ok() ) {
- BSONObj js = c->current();
- //if( queryTraceLevel >= 50 )
- // cout << " checking against:\n " << js.toString() << endl;
- nscanned++;
- bool deep;
- if( !matcher->matches(js, &deep) ) {
- }
- else if( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
- // got a match.
+ }
+
+ bool isSorted = false;
+ auto_ptr<Cursor> c = getSpecialCursor(ns);
+
+ if ( c.get() == 0 )
+ c = getIndexCursor(ns, query, order, 0, &isSorted, &hint);
+ if ( c.get() == 0 )
+ c = findTableScan(ns, order, &isSorted);
+
+ auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
+ JSMatcher &debug1 = *matcher;
+ assert( debug1.getN() < 1000 );
+
+ auto_ptr<ScanAndOrder> so;
+ bool ordering = false;
+ if ( !order.isEmpty() && !isSorted ) {
+ ordering = true;
+ ss << " scanAndOrder ";
+ so = auto_ptr<ScanAndOrder>(new ScanAndOrder(ntoskip, ntoreturn,order));
+ wantMore = false;
+ // scanAndOrder(b, c.get(), order, ntoreturn);
+ }
+
+ while ( c->ok() ) {
+ BSONObj js = c->current();
+ //if( queryTraceLevel >= 50 )
+ // cout << " checking against:\n " << js.toString() << endl;
+ nscanned++;
+ bool deep;
+ if ( !matcher->matches(js, &deep) ) {
+ }
+ else if ( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
+ // got a match.
assert( js.objsize() >= 0 ); //defensive for segfaults
- if( ordering ) {
+ if ( ordering ) {
// note: no cursors for non-indexed, ordered results. results must be fairly small.
so->add(js);
}
- else if( ntoskip > 0 ) {
+ else if ( ntoskip > 0 ) {
ntoskip--;
- } else {
- if( explain ) {
+ } else {
+ if ( explain ) {
n++;
- if( n >= ntoreturn && !wantMore )
+ if ( n >= ntoreturn && !wantMore )
break; // .limit() was used, show just that much.
}
else {
bool ok = fillQueryResultFromObj(b, filter.get(), js);
- if( ok ) n++;
- if( ok ) {
- if( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
- (ntoreturn==0 && (b.len()>1*1024*1024 || n>=101)) ) {
- /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
- is only a size limit. The idea is that on a find() where one doesn't use much results,
- we don't return much, but once getmore kicks in, we start pushing significant quantities.
-
- The n limit (vs. size) is important when someone fetches only one small field from big
- objects, which causes massive scanning server-side.
- */
- /* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
- if( wantMore && ntoreturn != 1 ) {
- if( useCursors ) {
- c->advance();
- if( c->ok() ) {
- // more...so save a cursor
- ClientCursor *cc = new ClientCursor();
- cc->c = c;
- cursorid = cc->cursorid;
- DEV cout << " query has more, cursorid: " << cursorid << endl;
- //cc->pattern = query;
- cc->matcher = matcher;
- cc->ns = ns;
- cc->pos = n;
- cc->filter = filter;
- cc->originalMessage = message;
- cc->updateLocation();
- }
+ if ( ok ) n++;
+ if ( ok ) {
+ if ( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
+ (ntoreturn==0 && (b.len()>1*1024*1024 || n>=101)) ) {
+ /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
+ is only a size limit. The idea is that on a find() where one doesn't use much results,
+ we don't return much, but once getmore kicks in, we start pushing significant quantities.
+
+ The n limit (vs. size) is important when someone fetches only one small field from big
+ objects, which causes massive scanning server-side.
+ */
+ /* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
+ if ( wantMore && ntoreturn != 1 ) {
+ if ( useCursors ) {
+ c->advance();
+ if ( c->ok() ) {
+ // more...so save a cursor
+ ClientCursor *cc = new ClientCursor();
+ cc->c = c;
+ cursorid = cc->cursorid;
+ DEV cout << " query has more, cursorid: " << cursorid << endl;
+ //cc->pattern = query;
+ cc->matcher = matcher;
+ cc->ns = ns;
+ cc->pos = n;
+ cc->filter = filter;
+ cc->originalMessage = message;
+ cc->updateLocation();
}
}
- break;
+ }
+ break;
}
}
}
}
- }
- c->advance();
- } // end while
+ }
+ c->advance();
+ } // end while
- if( explain ) {
+ if ( explain ) {
BSONObjBuilder builder;
builder.append("cursor", c->toString());
builder.append("nscanned", nscanned);
builder.append("n", ordering ? so->size() : n);
- if( ordering )
+ if ( ordering )
builder.append("scanAndOrder", true);
builder.append("millis", t.millis());
BSONObj obj = builder.done();
fillQueryResultFromObj(b, 0, obj);
n = 1;
- } else if( ordering ) {
+ } else if ( ordering ) {
so->fill(b, filter.get(), n);
- }
- else if( cursorid == 0 && (queryOptions & Option_CursorTailable) && c->tailable() ) {
- c->setAtTail();
- ClientCursor *cc = new ClientCursor();
- cc->c = c;
- cursorid = cc->cursorid;
- DEV cout << " query has no more but tailable, cursorid: " << cursorid << endl;
+ }
+ else if ( cursorid == 0 && (queryOptions & Option_CursorTailable) && c->tailable() ) {
+ c->setAtTail();
+ ClientCursor *cc = new ClientCursor();
+ cc->c = c;
+ cursorid = cc->cursorid;
+ DEV cout << " query has no more but tailable, cursorid: " << cursorid << endl;
//cc->pattern = query;
- cc->matcher = matcher;
- cc->ns = ns;
- cc->pos = n;
- cc->filter = filter;
- cc->originalMessage = message;
- cc->updateLocation();
- }
- }
-
- QueryResult *qr = (QueryResult *) b.buf();
- qr->_data[0] = 0;
- qr->_data[1] = 0;
- qr->_data[2] = 0;
- qr->_data[3] = 0;
- qr->len = b.len();
- ss << " reslen:" << b.len();
- // qr->channel = 0;
- qr->setOperation(opReply);
- qr->cursorId = cursorid;
- qr->startingFrom = 0;
- qr->nReturned = n;
- b.decouple();
+ cc->matcher = matcher;
+ cc->ns = ns;
+ cc->pos = n;
+ cc->filter = filter;
+ cc->originalMessage = message;
+ cc->updateLocation();
+ }
+ }
+
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->_data[0] = 0;
+ qr->_data[1] = 0;
+ qr->_data[2] = 0;
+ qr->_data[3] = 0;
+ qr->len = b.len();
+ ss << " reslen:" << b.len();
+ // qr->channel = 0;
+ qr->setOperation(opReply);
+ qr->cursorId = cursorid;
+ qr->startingFrom = 0;
+ qr->nReturned = n;
+ b.decouple();
int duration = t.millis();
- if( (database && database->profile) || duration >= 100 ) {
+ if ( (database && database->profile) || duration >= 100 ) {
ss << " nscanned:" << nscanned << ' ';
- if( ntoskip )
- ss << " ntoskip:" << ntoskip;
- if( database && database->profile )
+ if ( ntoskip )
+ ss << " ntoskip:" << ntoskip;
+ if ( database && database->profile )
ss << " <br>query: ";
ss << jsobj.toString() << ' ';
- }
- ss << " nreturned:" << n;
- return qr;
+ }
+ ss << " nreturned:" << n;
+ return qr;
}
//int dump = 0;
/* empty result for error conditions */
QueryResult* emptyMoreResult(long long cursorid) {
- BufBuilder b(32768);
- b.skip(sizeof(QueryResult));
- QueryResult *qr = (QueryResult *) b.buf();
- qr->cursorId = 0; // 0 indicates no more data to retrieve.
- qr->startingFrom = 0;
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->nReturned = 0;
- b.decouple();
- return qr;
+ BufBuilder b(32768);
+ b.skip(sizeof(QueryResult));
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->cursorId = 0; // 0 indicates no more data to retrieve.
+ qr->startingFrom = 0;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->nReturned = 0;
+ b.decouple();
+ return qr;
}
QueryResult* getMore(const char *ns, int ntoreturn, long long cursorid) {
- BufBuilder b(32768);
-
- ClientCursor *cc = ClientCursor::find(cursorid);
-
- b.skip(sizeof(QueryResult));
-
- int resultFlags = 0;
- int start = 0;
- int n = 0;
-
- if( !cc ) {
- DEV log() << "getMore: cursorid not found " << ns << " " << cursorid << endl;
- cursorid = 0;
- resultFlags = ResultFlag_CursorNotFound;
- }
- else {
- start = cc->pos;
- Cursor *c = cc->c.get();
- c->checkLocation();
- c->tailResume();
- while( 1 ) {
- if( !c->ok() ) {
- if( c->tailing() ) {
- c->setAtTail();
- break;
- }
- DEV log() << " getmore: last batch, erasing cursor " << cursorid << endl;
- bool ok = ClientCursor::erase(cursorid);
- assert(ok);
- cursorid = 0;
- cc = 0;
- break;
- }
- BSONObj js = c->current();
-
- bool deep;
- if( !cc->matcher->matches(js, &deep) ) {
- }
- else {
- //cout << "matches " << c->currLoc().toString() << ' ' << deep << '\n';
- if( deep && c->getsetdup(c->currLoc()) ) {
- //cout << " but it's a dup \n";
- }
- else {
- bool ok = fillQueryResultFromObj(b, cc->filter.get(), js);
- if( ok ) {
- n++;
- if( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
- (ntoreturn==0 && b.len()>1*1024*1024) ) {
- c->advance();
- if( c->tailing() && !c->ok() )
- c->setAtTail();
- cc->pos += n;
- //cc->updateLocation();
- break;
- }
- }
- }
- }
- c->advance();
- }
- if( cc )
+ BufBuilder b(32768);
+
+ ClientCursor *cc = ClientCursor::find(cursorid);
+
+ b.skip(sizeof(QueryResult));
+
+ int resultFlags = 0;
+ int start = 0;
+ int n = 0;
+
+ if ( !cc ) {
+ DEV log() << "getMore: cursorid not found " << ns << " " << cursorid << endl;
+ cursorid = 0;
+ resultFlags = ResultFlag_CursorNotFound;
+ }
+ else {
+ start = cc->pos;
+ Cursor *c = cc->c.get();
+ c->checkLocation();
+ c->tailResume();
+ while ( 1 ) {
+ if ( !c->ok() ) {
+ if ( c->tailing() ) {
+ c->setAtTail();
+ break;
+ }
+ DEV log() << " getmore: last batch, erasing cursor " << cursorid << endl;
+ bool ok = ClientCursor::erase(cursorid);
+ assert(ok);
+ cursorid = 0;
+ cc = 0;
+ break;
+ }
+ BSONObj js = c->current();
+
+ bool deep;
+ if ( !cc->matcher->matches(js, &deep) ) {
+ }
+ else {
+ //cout << "matches " << c->currLoc().toString() << ' ' << deep << '\n';
+ if ( deep && c->getsetdup(c->currLoc()) ) {
+ //cout << " but it's a dup \n";
+ }
+ else {
+ bool ok = fillQueryResultFromObj(b, cc->filter.get(), js);
+ if ( ok ) {
+ n++;
+ if ( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
+ (ntoreturn==0 && b.len()>1*1024*1024) ) {
+ c->advance();
+ if ( c->tailing() && !c->ok() )
+ c->setAtTail();
+ cc->pos += n;
+ //cc->updateLocation();
+ break;
+ }
+ }
+ }
+ }
+ c->advance();
+ }
+ if ( cc )
cc->updateLocation();
- }
-
- QueryResult *qr = (QueryResult *) b.buf();
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->resultFlags() = resultFlags;
- qr->cursorId = cursorid;
- qr->startingFrom = start;
- qr->nReturned = n;
- b.decouple();
-
- return qr;
+ }
+
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->resultFlags() = resultFlags;
+ qr->cursorId = cursorid;
+ qr->startingFrom = start;
+ qr->nReturned = n;
+ b.decouple();
+
+ return qr;
}
diff --git a/db/query.h b/db/query.h
index 1e0343f8f90..eff237ff2ad 100644
--- a/db/query.h
+++ b/db/query.h
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -23,7 +23,7 @@
#include "jsobj.h"
#include "storage.h"
-/* db request message format
+/* db request message format
unsigned opid; // arbitary; will be echoed back
byte operation;
@@ -33,7 +33,7 @@
dbInsert:
string collection;
- a series of JSObjects
+ a series of JSObjects
dbDelete:
string collection;
int flags=0; // 1=DeleteSingle
@@ -76,7 +76,7 @@
*/
/* the field 'resultFlags' above */
-enum {
+enum {
/* returned, with zero results, when getMore is called but the cursor id is not valid at the server. */
ResultFlag_CursorNotFound = 1
};
@@ -88,9 +88,9 @@ enum {
QueryResult* getMore(const char *ns, int ntoreturn, long long cursorid);
// caller must free() returned QueryResult.
-QueryResult* runQuery(Message&, const char *ns, int ntoskip, int ntoreturn,
- BSONObj j, auto_ptr< set<string> > fieldFilter,
- stringstream&, int queryOptions);
+QueryResult* runQuery(Message&, const char *ns, int ntoskip, int ntoreturn,
+ BSONObj j, auto_ptr< set<string> > fieldFilter,
+ stringstream&, int queryOptions);
void updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss);
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 4ae09d7d1cf..88e930b50bd 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -2,16 +2,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -31,10 +31,10 @@
#include "queryoptimizer.h"
QueryPlan QueryOptimizer::getPlan(
- const char *ns,
- BSONObj* query,
- BSONObj* order,
- BSONObj* hint)
+ const char *ns,
+ BSONObj* query,
+ BSONObj* order,
+ BSONObj* hint)
{
QueryPlan plan;
diff --git a/db/queryoptimizer.h b/db/queryoptimizer.h
index 40fca9e5ea1..3c41928e7e3 100644
--- a/db/queryoptimizer.h
+++ b/db/queryoptimizer.h
@@ -2,25 +2,25 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#pragma once
+#pragma once
-class QueryPlan {
+class QueryPlan {
public:
- QueryPlan() {
+ QueryPlan() {
scanAndOrderRequired = false;
simpleKeyMatch = false;
}
@@ -30,16 +30,16 @@ public:
/* ScanAndOrder processing will be required if true */
bool scanAndOrderRequired;
- /* When true, the index we are using has keys such that it can completely resolve the
+ /* When true, the index we are using has keys such that it can completely resolve the
query expression to match by itself without ever checking the main object.
*/
bool simpleKeyMatch;
};
-/* We put these objects inside the Database objects: that way later if we want to do
+/* We put these objects inside the Database objects: that way later if we want to do
stats, it's in the right place.
*/
-class QueryOptimizer {
+class QueryOptimizer {
public:
QueryPlan getPlan(
const char *ns,
diff --git a/db/repl.cpp b/db/repl.cpp
index d3c928b98d3..636a1db36bb 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -9,16 +9,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -31,7 +31,7 @@
{ haveLogged : true }
local.pair.startup - can contain a special value indicating for a pair that we have the master copy.
used when replacing other half of the pair which has permanently failed.
- local.pair.sync - { initialsynccomplete: 1 }
+ local.pair.sync - { initialsynccomplete: 1 }
*/
#include "stdafx.h"
@@ -57,44 +57,46 @@ void ensureHaveIdIndex(const char *ns);
/* if 1 sync() is running */
int syncing = 0;
-/* if true replace our peer in a replication pair -- don't worry about if his
+/* if true replace our peer in a replication pair -- don't worry about if his
local.oplog.$main is empty.
*/
bool replacePeer = false;
-/* "dead" means something really bad happened like replication falling completely out of sync.
+/* "dead" means something really bad happened like replication falling completely out of sync.
when non-null, we are dead and the string is informational
*/
const char *allDead = 0;
-/* This is set to true if we have EVER been up to date -- this way a new pair member
+/* This is set to true if we have EVER been up to date -- this way a new pair member
which is a replacement won't go online as master until we have initially fully synced.
*/
-class PairSync {
+class PairSync {
int initialsynccomplete;
public:
- PairSync() { initialsynccomplete = -1; }
+ PairSync() {
+ initialsynccomplete = -1;
+ }
- /* call before using the class. from dbmutex */
+ /* call before using the class. from dbmutex */
void init() {
BSONObj o;
initialsynccomplete = 0;
- if( getSingleton("local.pair.sync", o) )
+ if ( getSingleton("local.pair.sync", o) )
initialsynccomplete = 1;
}
- bool initialSyncCompleted() {
- return initialsynccomplete != 0;
+ bool initialSyncCompleted() {
+ return initialsynccomplete != 0;
}
- void setInitialSyncCompleted() {
+ void setInitialSyncCompleted() {
BSONObj o = fromjson("{initialsynccomplete:1}");
putSingleton("local.pair.sync", o);
initialsynccomplete = 1;
}
- void setInitialSyncCompletedLocking() {
- if( initialsynccomplete == 1 )
+ void setInitialSyncCompletedLocking() {
+ if ( initialsynccomplete == 1 )
return;
dblock lk;
BSONObj o = fromjson("{initialsynccomplete:1}");
@@ -102,12 +104,14 @@ public:
initialsynccomplete = 1;
}
} pairSync;
-bool getInitialSyncCompleted() { return pairSync.initialSyncCompleted(); }
+bool getInitialSyncCompleted() {
+ return pairSync.initialSyncCompleted();
+}
#include "replset.h"
#define debugrepl(z) cout << "debugrepl " << z << '\n'
-//define debugrepl
+//define debugrepl
/* --- ReplPair -------------------------------- */
@@ -115,25 +119,29 @@ ReplPair *replPair = 0;
/* output by the web console */
const char *replInfo = "";
-struct ReplInfo {
- ReplInfo(const char *msg) { replInfo = msg; }
- ~ReplInfo() { replInfo = "?"; }
+struct ReplInfo {
+ ReplInfo(const char *msg) {
+ replInfo = msg;
+ }
+ ~ReplInfo() {
+ replInfo = "?";
+ }
};
-void ReplPair::setMaster(int n, const char *_comment ) {
- if ( n == State_Master && !pairSync.initialSyncCompleted() )
- return;
- info = _comment;
- if( n != state && !quiet )
- log() << "pair: setting master=" << n << " was " << state << '\n';
- state = n;
+void ReplPair::setMaster(int n, const char *_comment ) {
+ if ( n == State_Master && !pairSync.initialSyncCompleted() )
+ return;
+ info = _comment;
+ if ( n != state && !quiet )
+ log() << "pair: setting master=" << n << " was " << state << '\n';
+ state = n;
}
/* peer unreachable, try our arbiter */
void ReplPair::arbitrate() {
ReplInfo r("arbitrate");
- if( arbHost == "-" ) {
+ if ( arbHost == "-" ) {
// no arbiter. we are up, let's assume he is down and network is not partitioned.
setMasterLocked(State_Master, "remote unreachable");
return;
@@ -141,15 +149,15 @@ void ReplPair::arbitrate() {
auto_ptr<DBClientConnection> conn( newClientConnection() );
string errmsg;
- if( !conn->connect(arbHost.c_str(), errmsg) ) {
+ if ( !conn->connect(arbHost.c_str(), errmsg) ) {
setMasterLocked(State_CantArb, "can't connect to arb");
return;
}
bool is_master;
BSONObj res = conn->cmdIsMaster(is_master);
- /*findOne("admin.$cmd", ismasterobj);*/
- if( res.isEmpty() ) {
+ /*findOne("admin.$cmd", ismasterobj);*/
+ if ( res.isEmpty() ) {
setMasterLocked(State_CantArb, "can't arb 2");
return;
}
@@ -159,35 +167,41 @@ void ReplPair::arbitrate() {
/* --------------------------------------------- */
-class CmdReplacePeer : public Command {
+class CmdReplacePeer : public Command {
public:
- virtual bool slaveOk() { return true; }
- virtual bool adminOnly() { return true; }
- virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() {
+ return true;
+ }
+ virtual bool adminOnly() {
+ return true;
+ }
+ virtual bool logTheOp() {
+ return false;
+ }
CmdReplacePeer() : Command("replacepeer") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if( replPair == 0 ) {
+ if ( replPair == 0 ) {
errmsg = "not paired";
return false;
- }
- if( !pairSync.initialSyncCompleted() ) {
- errmsg = "not caught up cannot replace peer";
+ }
+ if ( !pairSync.initialSyncCompleted() ) {
+ errmsg = "not caught up cannot replace peer";
return false;
}
- if( syncing < 0 ) {
+ if ( syncing < 0 ) {
errmsg = "replacepeer already invoked";
return false;
}
Timer t;
- while( 1 ) {
- if( syncing == 0 || t.millis() > 20000 )
+ while ( 1 ) {
+ if ( syncing == 0 || t.millis() > 20000 )
break;
{
dbtemprelease t;
sleepmillis(10);
}
}
- if( syncing ) {
+ if ( syncing ) {
assert( syncing > 0 );
errmsg = "timeout waiting for sync() to finish";
return false;
@@ -195,7 +209,7 @@ public:
{
vector<ReplSource*> sources;
ReplSource::loadAll(sources);
- if( sources.size() != 1 ) {
+ if ( sources.size() != 1 ) {
errmsg = "local.sources.count() != 1, cannot replace peer";
return false;
}
@@ -212,24 +226,26 @@ public:
}
} cmdReplacePeer;
-class CmdIsMaster : public Command {
+class CmdIsMaster : public Command {
public:
- virtual bool slaveOk() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
CmdIsMaster() : Command("ismaster") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
- if( allDead ) {
+ if ( allDead ) {
result.append("ismaster", 0.0);
- if( replPair )
+ if ( replPair )
result.append("remote", replPair->remote);
result.append("info", allDead);
}
- else if( replPair ) {
+ else if ( replPair ) {
result.append("ismaster", replPair->state);
result.append("remote", replPair->remote);
- if( replPair->info.empty() )
+ if ( replPair->info.empty() )
result.append("info", replPair->info);
}
- else {
+ else {
result.append("ismaster", 1);
result.append("msg", "not paired");
}
@@ -249,20 +265,24 @@ public:
1,!1 -> 1,0
-1,-1 -> dominant->1, nondom->0
0,0 -> dominant->1, nondom->0
- 1,1 -> dominant->1, nondom->0
-
+ 1,1 -> dominant->1, nondom->0
+
{ negotiatemaster:1, i_was:<state>, your_name:<hostname> }
returns:
- { ok:1, you_are:..., i_am:... }
+ { ok:1, you_are:..., i_am:... }
*/
-class CmdNegotiateMaster : public Command {
+class CmdNegotiateMaster : public Command {
public:
CmdNegotiateMaster() : Command("negotiatemaster") { }
- virtual bool slaveOk() { return true; }
- virtual bool adminOnly() { return true; }
+ virtual bool slaveOk() {
+ return true;
+ }
+ virtual bool adminOnly() {
+ return true;
+ }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- if( replPair == 0 ) {
+ if ( replPair == 0 ) {
problem() << "got negotiatemaster cmd but we are not in paired mode." << endl;
errmsg = "not paired";
return false;
@@ -270,29 +290,31 @@ public:
int was = cmdObj.getIntField("i_was");
string myname = cmdObj.getStringField("your_name");
- if( myname.empty() || was < -1 ) {
+ if ( myname.empty() || was < -1 ) {
errmsg = "your_name/i_was not specified";
return false;
}
- int N = ReplPair::State_Negotiating;
- int M = ReplPair::State_Master;
- int S = ReplPair::State_Slave;
-
- if( !replPair->dominant( myname ) ) {
- result.append( "you_are", N );
- result.append( "i_am", N );
- return true;
- }
-
- int me, you;
- if( !pairSync.initialSyncCompleted() || ( replPair->state != M && was == M ) ) {
- me=S;you=M;
- }
- else {
- me=M;you=S;
- }
- replPair->setMaster( me, "CmdNegotiateMaster::run()" );
+ int N = ReplPair::State_Negotiating;
+ int M = ReplPair::State_Master;
+ int S = ReplPair::State_Slave;
+
+ if ( !replPair->dominant( myname ) ) {
+ result.append( "you_are", N );
+ result.append( "i_am", N );
+ return true;
+ }
+
+ int me, you;
+ if ( !pairSync.initialSyncCompleted() || ( replPair->state != M && was == M ) ) {
+ me=S;
+ you=M;
+ }
+ else {
+ me=M;
+ you=S;
+ }
+ replPair->setMaster( me, "CmdNegotiateMaster::run()" );
result.append("you_are", you);
result.append("i_am", me);
@@ -301,139 +323,139 @@ public:
}
} cmdnegotiatemaster;
-void ReplPair::negotiate(DBClientConnection *conn) {
+void ReplPair::negotiate(DBClientConnection *conn) {
BSONObjBuilder b;
b.append("negotiatemaster",1);
b.append("i_was", state);
b.append("your_name", remoteHost);
BSONObj cmd = b.done();
BSONObj res = conn->findOne("admin.$cmd", cmd);
- if( res.getIntField("ok") != 1 ) {
+ if ( res.getIntField("ok") != 1 ) {
problem() << "negotiate fails: " << res.toString() << '\n';
setMasterLocked(State_Confused);
return;
}
int x = res.getIntField("you_are");
- // State_Negotiating means the remote node is not dominant and cannot
- // choose who is master.
- if( x != State_Slave && x != State_Master && x != State_Negotiating ) {
+ // State_Negotiating means the remote node is not dominant and cannot
+ // choose who is master.
+ if ( x != State_Slave && x != State_Master && x != State_Negotiating ) {
problem() << "negotiate: bad you_are value " << res.toString() << endl;
return;
}
- if( x != State_Negotiating ) {
- // Don't actually have to lock here, since we only get here if not the
- // dominant node.
- setMaster(x);
- }
+ if ( x != State_Negotiating ) {
+ // Don't actually have to lock here, since we only get here if not the
+ // dominant node.
+ setMaster(x);
+ }
}
OpTime last(0, 0);
-OpTime OpTime::now() {
- unsigned t = (unsigned) time(0);
- if( last.secs == t ) {
- last.i++;
- return last;
- }
- last = OpTime(t, 1);
- return last;
+OpTime OpTime::now() {
+ unsigned t = (unsigned) time(0);
+ if ( last.secs == t ) {
+ last.i++;
+ return last;
+ }
+ last = OpTime(t, 1);
+ return last;
}
-struct TestOpTime {
- TestOpTime() {
- OpTime t;
- for( int i = 0; i < 10; i++ ) {
- OpTime s = OpTime::now();
- assert( s != t );
- t = s;
- }
- OpTime q = t;
- assert( q == t );
- assert( !(q != t) );
- }
+struct TestOpTime {
+ TestOpTime() {
+ OpTime t;
+ for ( int i = 0; i < 10; i++ ) {
+ OpTime s = OpTime::now();
+ assert( s != t );
+ t = s;
+ }
+ OpTime q = t;
+ assert( q == t );
+ assert( !(q != t) );
+ }
} testoptime;
-int test2() {
- return 0;
+int test2() {
+ return 0;
}
/* --------------------------------------------------------------*/
ReplSource::ReplSource() {
replacing = false;
- nClonedThisPass = 0;
- paired = false;
+ nClonedThisPass = 0;
+ paired = false;
}
ReplSource::ReplSource(BSONObj o) : nClonedThisPass(0) {
replacing = false;
- paired = false;
- only = o.getStringField("only");
- hostName = o.getStringField("host");
- _sourceName = o.getStringField("source");
- uassert( "'host' field not set in sources collection object", !hostName.empty() );
+ paired = false;
+ only = o.getStringField("only");
+ hostName = o.getStringField("host");
+ _sourceName = o.getStringField("source");
+ uassert( "'host' field not set in sources collection object", !hostName.empty() );
uassert( "only source='main' allowed for now with replication", sourceName() == "main" );
- BSONElement e = o.getField("syncedTo");
- if( !e.eoo() ) {
- uassert( "bad sources 'syncedTo' field value", e.type() == Date );
- OpTime tmp( e.date() );
- syncedTo = tmp;
- //syncedTo.asDate() = e.date();
- }
-
- BSONObj dbsObj = o.getObjectField("dbs");
- if( !dbsObj.isEmpty() ) {
- BSONObjIterator i(dbsObj);
- while( 1 ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- dbs.insert( e.fieldName() );
- }
- }
+ BSONElement e = o.getField("syncedTo");
+ if ( !e.eoo() ) {
+ uassert( "bad sources 'syncedTo' field value", e.type() == Date );
+ OpTime tmp( e.date() );
+ syncedTo = tmp;
+ //syncedTo.asDate() = e.date();
+ }
+
+ BSONObj dbsObj = o.getObjectField("dbs");
+ if ( !dbsObj.isEmpty() ) {
+ BSONObjIterator i(dbsObj);
+ while ( 1 ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ dbs.insert( e.fieldName() );
+ }
+ }
}
/* Turn our C++ Source object into a BSONObj */
BSONObj ReplSource::jsobj() {
- BSONObjBuilder b;
- b.append("host", hostName);
- b.append("source", sourceName());
- if( !only.empty() )
- b.append("only", only);
- if( !syncedTo.isNull() )
+ BSONObjBuilder b;
+ b.append("host", hostName);
+ b.append("source", sourceName());
+ if ( !only.empty() )
+ b.append("only", only);
+ if ( !syncedTo.isNull() )
b.appendDate("syncedTo", syncedTo.asDate());
- BSONObjBuilder dbs_builder;
+ BSONObjBuilder dbs_builder;
int n = 0;
- for( set<string>::iterator i = dbs.begin(); i != dbs.end(); i++ ) {
+ for ( set<string>::iterator i = dbs.begin(); i != dbs.end(); i++ ) {
n++;
- dbs_builder.appendBool(i->c_str(), 1);
- }
- if( n )
+ dbs_builder.appendBool(i->c_str(), 1);
+ }
+ if ( n )
b.append("dbs", dbs_builder.done());
- return b.doneAndDecouple();
+ return b.doneAndDecouple();
}
-void ReplSource::save() {
- BSONObjBuilder b;
+void ReplSource::save() {
+ BSONObjBuilder b;
assert( !hostName.empty() );
- b.append("host", hostName);
+ b.append("host", hostName);
// todo: finish allowing multiple source configs.
// this line doesn't work right when source is null, if that is allowed as it is now:
//b.append("source", _sourceName);
- BSONObj pattern = b.done();
+ BSONObj pattern = b.done();
- BSONObj o = jsobj();
+ BSONObj o = jsobj();
- stringstream ss;
- setClient("local.sources");
- int u = _updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, ss);
- assert( u == 1 || u == 4 );
- database = 0;
+ stringstream ss;
+ setClient("local.sources");
+ int u = _updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, ss);
+ assert( u == 1 || u == 4 );
+ database = 0;
- if( replacing ) {
- /* if we were in "replace" mode, we now have synced up with the replacement,
+ if ( replacing ) {
+ /* if we were in "replace" mode, we now have synced up with the replacement,
so turn that off.
*/
replacing = false;
@@ -443,46 +465,46 @@ void ReplSource::save() {
}
}
-void ReplSource::cleanup(vector<ReplSource*>& v) {
- for( vector<ReplSource*>::iterator i = v.begin(); i != v.end(); i++ )
- delete *i;
+void ReplSource::cleanup(vector<ReplSource*>& v) {
+ for ( vector<ReplSource*>::iterator i = v.begin(); i != v.end(); i++ )
+ delete *i;
}
string dashDashSource;
-static void addSourceToList(vector<ReplSource*>&v, ReplSource& s, vector<ReplSource*>&old) {
- for( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); ) {
- if( s == **i ) {
- v.push_back(*i);
- old.erase(i);
- return;
- }
- i++;
- }
-
- v.push_back( new ReplSource(s) );
+static void addSourceToList(vector<ReplSource*>&v, ReplSource& s, vector<ReplSource*>&old) {
+ for ( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); ) {
+ if ( s == **i ) {
+ v.push_back(*i);
+ old.erase(i);
+ return;
+ }
+ i++;
+ }
+
+ v.push_back( new ReplSource(s) );
}
-/* we reuse our existing objects so that we can keep our existing connection
- and cursor in effect.
+/* we reuse our existing objects so that we can keep our existing connection
+ and cursor in effect.
*/
-void ReplSource::loadAll(vector<ReplSource*>& v) {
- vector<ReplSource *> old = v;
+void ReplSource::loadAll(vector<ReplSource*>& v) {
+ vector<ReplSource *> old = v;
v.erase(v.begin(), v.end());
- bool gotPairWith = false;
+ bool gotPairWith = false;
- if( !dashDashSource.empty() ) {
+ if ( !dashDashSource.empty() ) {
setClient("local.sources");
// --source <host> specified.
// check that no items are in sources other than that
// add if missing
auto_ptr<Cursor> c = findTableScan("local.sources", emptyObj);
int n = 0;
- while( c->ok() ) {
+ while ( c->ok() ) {
n++;
ReplSource tmp(c->current());
- if( tmp.hostName != dashDashSource ) {
+ if ( tmp.hostName != dashDashSource ) {
problem() << "--source " << dashDashSource << " != " << tmp.hostName << " from local.sources collection" << endl;
log() << "terminating after 30 seconds" << endl;
sleepsecs(30);
@@ -491,7 +513,7 @@ void ReplSource::loadAll(vector<ReplSource*>& v) {
c->advance();
}
uassert( "local.sources collection corrupt?", n<2 );
- if( n == 0 ) {
+ if ( n == 0 ) {
// source missing. add.
ReplSource s;
s.hostName = dashDashSource;
@@ -499,290 +521,290 @@ void ReplSource::loadAll(vector<ReplSource*>& v) {
}
}
- setClient("local.sources");
- auto_ptr<Cursor> c = findTableScan("local.sources", emptyObj);
- while( c->ok() ) {
- ReplSource tmp(c->current());
- if( replPair && tmp.hostName == replPair->remote && tmp.sourceName() == "main" ) {
- gotPairWith = true;
+ setClient("local.sources");
+ auto_ptr<Cursor> c = findTableScan("local.sources", emptyObj);
+ while ( c->ok() ) {
+ ReplSource tmp(c->current());
+ if ( replPair && tmp.hostName == replPair->remote && tmp.sourceName() == "main" ) {
+ gotPairWith = true;
tmp.paired = true;
- if( replacePeer ) {
+ if ( replacePeer ) {
// peer was replaced -- start back at the beginning.
tmp.syncedTo = OpTime();
tmp.replacing = true;
}
}
- addSourceToList(v, tmp, old);
- c->advance();
- }
- database = 0;
-
- if( !gotPairWith && replPair ) {
- /* add the --pairwith server */
- ReplSource *s = new ReplSource();
- s->paired = true;
- s->hostName = replPair->remote;
+ addSourceToList(v, tmp, old);
+ c->advance();
+ }
+ database = 0;
+
+ if ( !gotPairWith && replPair ) {
+ /* add the --pairwith server */
+ ReplSource *s = new ReplSource();
+ s->paired = true;
+ s->hostName = replPair->remote;
s->replacing = replacePeer;
- v.push_back(s);
- }
+ v.push_back(s);
+ }
- for( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); i++ )
+ for ( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); i++ )
delete *i;
}
BSONObj opTimeQuery = fromjson("{getoptime:1}");
bool ReplSource::resync(string db) {
- {
- log() << "resync: dropping database " << db << endl;
- string dummyns = db + ".";
- assert( database->name == db );
- dropDatabase(dummyns.c_str());
- setClientTempNs(dummyns.c_str());
- }
-
- {
- log() << "resync: cloning database " << db << endl;
- ReplInfo r("resync: cloning a database");
- string errmsg;
- bool ok = cloneFrom(hostName.c_str(), errmsg, database->name, false, /*slaveok*/ true);
- if( !ok ) {
- problem() << "resync of " << db << " from " << hostName << " failed " << errmsg << endl;
- throw SyncException();
- }
- }
-
- log() << "resync: done " << db << endl;
-
- /* add the db to our dbs array which we will write back to local.sources.
- note we are not in a consistent state until the oplog gets applied,
- which happens next when this returns.
- */
- dbs.insert(db);
- return true;
+ {
+ log() << "resync: dropping database " << db << endl;
+ string dummyns = db + ".";
+ assert( database->name == db );
+ dropDatabase(dummyns.c_str());
+ setClientTempNs(dummyns.c_str());
+ }
+
+ {
+ log() << "resync: cloning database " << db << endl;
+ ReplInfo r("resync: cloning a database");
+ string errmsg;
+ bool ok = cloneFrom(hostName.c_str(), errmsg, database->name, false, /*slaveok*/ true);
+ if ( !ok ) {
+ problem() << "resync of " << db << " from " << hostName << " failed " << errmsg << endl;
+ throw SyncException();
+ }
+ }
+
+ log() << "resync: done " << db << endl;
+
+ /* add the db to our dbs array which we will write back to local.sources.
+ note we are not in a consistent state until the oplog gets applied,
+ which happens next when this returns.
+ */
+ dbs.insert(db);
+ return true;
}
/* local.$oplog.main is of the form:
- { ts: ..., op: <optype>, ns: ..., o: <obj> , o2: <extraobj>, b: <boolflag> }
+ { ts: ..., op: <optype>, ns: ..., o: <obj> , o2: <extraobj>, b: <boolflag> }
...
see logOp() comments.
*/
void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op) {
- char clientName[MaxClientLen];
- const char *ns = op.getStringField("ns");
- nsToClient(ns, clientName);
+ char clientName[MaxClientLen];
+ const char *ns = op.getStringField("ns");
+ nsToClient(ns, clientName);
- if( *ns == '.' ) {
+ if ( *ns == '.' ) {
problem() << "skipping bad op in oplog: " << op.toString() << endl;
return;
}
- else if( *ns == 0 ) {
+ else if ( *ns == 0 ) {
problem() << "halting replication, bad op in oplog:\n " << op.toString() << endl;
allDead = "bad object in oplog";
throw SyncException();
}
- if( !only.empty() && only != clientName )
- return;
+ if ( !only.empty() && only != clientName )
+ return;
- bool newDb = dbs.count(clientName) == 0;
- if( newDb && nClonedThisPass ) {
- /* we only clone one database per pass, even if a lot need done. This helps us
- avoid overflowing the master's transaction log by doing too much work before going
- back to read more transactions. (Imagine a scenario of slave startup where we try to
- clone 100 databases in one pass.)
- */
- addDbNextPass.insert(clientName);
- return;
- }
+ bool newDb = dbs.count(clientName) == 0;
+ if ( newDb && nClonedThisPass ) {
+ /* we only clone one database per pass, even if a lot need done. This helps us
+ avoid overflowing the master's transaction log by doing too much work before going
+ back to read more transactions. (Imagine a scenario of slave startup where we try to
+ clone 100 databases in one pass.)
+ */
+ addDbNextPass.insert(clientName);
+ return;
+ }
- dblock lk;
- bool justCreated = setClientTempNs(ns);
- if( allDead ) {
+ dblock lk;
+ bool justCreated = setClientTempNs(ns);
+ if ( allDead ) {
// hmmm why is this check here and not at top of this function? does it get set between top and here?
log() << "allDead, throwing SyncException\n";
- throw SyncException();
+ throw SyncException();
}
// operation type -- see logOp() comments for types
- const char *opType = op.getStringField("op");
-
- if( justCreated || /* datafiles were missing. so we need everything, no matter what sources object says */
- newDb ) /* if not in dbs, we've never synced this database before, so we need everything */
- {
- if( op.getBoolField("first") &&
- pairSync.initialSyncCompleted() /*<- when false, we are a replacement volume for a pair and need a full sync */
- ) {
+ const char *opType = op.getStringField("op");
+
+ if ( justCreated || /* datafiles were missing. so we need everything, no matter what sources object says */
+ newDb ) /* if not in dbs, we've never synced this database before, so we need everything */
+ {
+ if ( op.getBoolField("first") &&
+ pairSync.initialSyncCompleted() /*<- when false, we are a replacement volume for a pair and need a full sync */
+ ) {
log() << "pull: got {first:true} op ns:" << ns << '\n';
/* this is the first thing in the oplog ever, so we don't need to resync(). */
- if( newDb )
+ if ( newDb )
dbs.insert(clientName);
- else
+ else
problem() << "warning: justCreated && !newDb in repl " << op.toString() << endl;
}
- else if( paired && !justCreated ) {
- if( strcmp(opType,"db") == 0 && strcmp(ns, "admin.") == 0 ) {
- // "admin" is a special namespace we use for priviledged commands -- ok if it exists first on
+ else if ( paired && !justCreated ) {
+ if ( strcmp(opType,"db") == 0 && strcmp(ns, "admin.") == 0 ) {
+ // "admin" is a special namespace we use for priviledged commands -- ok if it exists first on
// either side
}
else {
- /* the other half of our pair has some operations. yet we already had a db on our
+ /* the other half of our pair has some operations. yet we already had a db on our
disk even though the db in question is not listed in the source.
*/
allDead = "pair: historical image missing for a db";
problem() << "pair: historical image missing for " << clientName << ", setting allDead=true" << endl;
log() << "op:" << op.toString() << endl;
/*
- log() << "TEMP: pair: assuming we have the historical image for: " <<
+ log() << "TEMP: pair: assuming we have the historical image for: " <<
clientName << ". add extra checks here." << endl;
dbs.insert(clientName);
*/
}
- }
- else {
- nClonedThisPass++;
- resync(database->name);
- }
+ }
+ else {
+ nClonedThisPass++;
+ resync(database->name);
+ }
addDbNextPass.erase(clientName);
- }
-
- stringstream ss;
- BSONObj o = op.getObjectField("o");
- try {
- if( *opType == 'i' ) {
- const char *p = strchr(ns, '.');
- if( p && strcmp(p, ".system.indexes") == 0 ) {
- // updates aren't allowed for indexes -- so we will do a regular insert. if index already
- // exists, that is ok.
- theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
- }
- else {
- // do upserts for inserts as we might get replayed more than once
- OID *oid = o.getOID();
- if( oid == 0 ) {
- _updateObjects(ns, o, o, true, ss);
- }
- else {
- BSONObjBuilder b;
- b.appendOID("_id", oid);
- RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
- _updateObjects(ns, o, b.done(), true, ss);
- }
- }
- }
- else if( *opType == 'u' ) {
- RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
- _updateObjects(ns, o, op.getObjectField("o2"), op.getBoolField("b"), ss);
- }
- else if( *opType == 'd' ) {
- if( opType[1] == 0 )
- deleteObjects(ns, o, op.getBoolField("b"));
- else
- assert( opType[1] == 'b' ); // "db" advertisement
- }
- else {
- BufBuilder bb;
- BSONObjBuilder ob;
- assert( *opType == 'c' );
- _runCommands(ns, o, ss, bb, ob, true);
- }
- }
- catch( UserAssertionException& e ) {
- log() << "sync: caught user assertion " << e.msg << '\n';
- }
- database = 0;
+ }
+
+ stringstream ss;
+ BSONObj o = op.getObjectField("o");
+ try {
+ if ( *opType == 'i' ) {
+ const char *p = strchr(ns, '.');
+ if ( p && strcmp(p, ".system.indexes") == 0 ) {
+ // updates aren't allowed for indexes -- so we will do a regular insert. if index already
+ // exists, that is ok.
+ theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
+ }
+ else {
+ // do upserts for inserts as we might get replayed more than once
+ OID *oid = o.getOID();
+ if ( oid == 0 ) {
+ _updateObjects(ns, o, o, true, ss);
+ }
+ else {
+ BSONObjBuilder b;
+ b.appendOID("_id", oid);
+ RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
+ _updateObjects(ns, o, b.done(), true, ss);
+ }
+ }
+ }
+ else if ( *opType == 'u' ) {
+ RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
+ _updateObjects(ns, o, op.getObjectField("o2"), op.getBoolField("b"), ss);
+ }
+ else if ( *opType == 'd' ) {
+ if ( opType[1] == 0 )
+ deleteObjects(ns, o, op.getBoolField("b"));
+ else
+ assert( opType[1] == 'b' ); // "db" advertisement
+ }
+ else {
+ BufBuilder bb;
+ BSONObjBuilder ob;
+ assert( *opType == 'c' );
+ _runCommands(ns, o, ss, bb, ob, true);
+ }
+ }
+ catch ( UserAssertionException& e ) {
+ log() << "sync: caught user assertion " << e.msg << '\n';
+ }
+ database = 0;
}
/* note: not yet in mutex at this point. */
-bool ReplSource::sync_pullOpLog() {
- string ns = string("local.oplog.$") + sourceName();
+bool ReplSource::sync_pullOpLog() {
+ string ns = string("local.oplog.$") + sourceName();
debugrepl( "sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() );
- bool tailing = true;
- DBClientCursor *c = cursor.get();
- if( c && c->isDead() ) {
- log() << "pull: old cursor isDead, initiating a new one\n";
- c = 0;
- }
-
- if( c == 0 ) {
- BSONObjBuilder q;
- q.appendDate("$gte", syncedTo.asDate());
- BSONObjBuilder query;
- query.append("ts", q.done());
+ bool tailing = true;
+ DBClientCursor *c = cursor.get();
+ if ( c && c->isDead() ) {
+ log() << "pull: old cursor isDead, initiating a new one\n";
+ c = 0;
+ }
+
+ if ( c == 0 ) {
+ BSONObjBuilder q;
+ q.appendDate("$gte", syncedTo.asDate());
+ BSONObjBuilder query;
+ query.append("ts", q.done());
BSONObj queryObj = query.done();
- // queryObj = { ts: { $gte: syncedTo } }
+ // queryObj = { ts: { $gte: syncedTo } }
debugrepl( ns << ".find(" << queryObj.toString() << ')' );
- cursor = conn->query( ns.c_str(), queryObj, 0, 0, 0, Option_CursorTailable | Option_SlaveOk );
- c = cursor.get();
- tailing = false;
- }
- else {
+ cursor = conn->query( ns.c_str(), queryObj, 0, 0, 0, Option_CursorTailable | Option_SlaveOk );
+ c = cursor.get();
+ tailing = false;
+ }
+ else {
debugrepl( "tailing=true" );
}
- if( c == 0 ) {
+ if ( c == 0 ) {
problem() << "pull: dbclient::query returns null (conn closed?)" << endl;
resetConnection();
sleepsecs(3);
return false;
}
- // show any deferred database creates from a previous pass
- {
- set<string>::iterator i = addDbNextPass.begin();
- if( i != addDbNextPass.end() ) {
- BSONObjBuilder b;
- b.append("ns", *i + '.');
- b.append("op", "db");
- BSONObj op = b.done();
- sync_pullOpLog_applyOperation(op);
- }
- }
-
- if( !c->more() ) {
- if( tailing ) {
+ // show any deferred database creates from a previous pass
+ {
+ set<string>::iterator i = addDbNextPass.begin();
+ if ( i != addDbNextPass.end() ) {
+ BSONObjBuilder b;
+ b.append("ns", *i + '.');
+ b.append("op", "db");
+ BSONObj op = b.done();
+ sync_pullOpLog_applyOperation(op);
+ }
+ }
+
+ if ( !c->more() ) {
+ if ( tailing ) {
debugrepl( "tailing & no new activity" );
} else
- log() << "pull: " << ns << " oplog is empty\n";
- sleepsecs(3);
- return true;
- }
+ log() << "pull: " << ns << " oplog is empty\n";
+ sleepsecs(3);
+ return true;
+ }
int n = 0;
- BSONObj op = c->next();
- BSONElement ts = op.findElement("ts");
- if( ts.type() != Date ) {
+ BSONObj op = c->next();
+ BSONElement ts = op.findElement("ts");
+ if ( ts.type() != Date ) {
string err = op.getStringField("$err");
- if( !err.empty() ) {
+ if ( !err.empty() ) {
problem() << "pull: $err reading remote oplog: " + err << '\n';
massert( "got $err reading remote oplog", false );
}
- else {
+ else {
problem() << "pull: bad object read from remote oplog: " << op.toString() << '\n';
massert("pull: bad object read from remote oplog", false);
}
}
- OpTime nextOpTime( ts.date() );
+ OpTime nextOpTime( ts.date() );
debugrepl( "first op time received: " << nextOpTime.toString() );
- bool initial = syncedTo.isNull();
- if( initial || tailing ) {
- if( tailing ) {
- assert( syncedTo < nextOpTime );
- }
- else {
- log() << "pull: initial run\n";
- }
+ bool initial = syncedTo.isNull();
+ if ( initial || tailing ) {
+ if ( tailing ) {
+ assert( syncedTo < nextOpTime );
+ }
+ else {
+ log() << "pull: initial run\n";
+ }
{
sync_pullOpLog_applyOperation(op);
n++;
}
- }
- else if( nextOpTime != syncedTo ) {
+ }
+ else if ( nextOpTime != syncedTo ) {
Logstream& l = log();
- l << "pull: nextOpTime " << nextOpTime.toStringLong() << ' ';
- if( nextOpTime < syncedTo )
+ l << "pull: nextOpTime " << nextOpTime.toStringLong() << ' ';
+ if ( nextOpTime < syncedTo )
l << "<??";
else
l << ">";
@@ -792,67 +814,67 @@ bool ReplSource::sync_pullOpLog() {
log() << "pull: tailing: " << tailing << '\n';
log() << "pull: data too stale, halting replication" << endl;
replInfo = allDead = "data too stale halted replication";
- assert( syncedTo < nextOpTime );
- throw SyncException();
- }
- else {
+ assert( syncedTo < nextOpTime );
+ throw SyncException();
+ }
+ else {
/* t == syncedTo, so the first op was applied previously, no need to redo it. */
}
- // apply operations
- {
- while( 1 ) {
- if( !c->more() ) {
- log() << "pull: applied " << n << " operations" << endl;
- syncedTo = nextOpTime;
+ // apply operations
+ {
+ while ( 1 ) {
+ if ( !c->more() ) {
+ log() << "pull: applied " << n << " operations" << endl;
+ syncedTo = nextOpTime;
debugrepl( "end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() );
- dblock lk;
- save(); // note how far we are synced up to now
- break;
- }
- /* todo: get out of the mutex for the next()? */
- BSONObj op = c->next();
- ts = op.findElement("ts");
- assert( ts.type() == Date );
- OpTime last = nextOpTime;
- OpTime tmp( ts.date() );
- nextOpTime = tmp;
- if( !( last < nextOpTime ) ) {
- problem() << "sync error: last " << last.toString() << " >= nextOpTime " << nextOpTime.toString() << endl;
- uassert("bad 'ts' value in sources", false);
- }
-
- sync_pullOpLog_applyOperation(op);
- n++;
- }
- }
+ dblock lk;
+ save(); // note how far we are synced up to now
+ break;
+ }
+ /* todo: get out of the mutex for the next()? */
+ BSONObj op = c->next();
+ ts = op.findElement("ts");
+ assert( ts.type() == Date );
+ OpTime last = nextOpTime;
+ OpTime tmp( ts.date() );
+ nextOpTime = tmp;
+ if ( !( last < nextOpTime ) ) {
+ problem() << "sync error: last " << last.toString() << " >= nextOpTime " << nextOpTime.toString() << endl;
+ uassert("bad 'ts' value in sources", false);
+ }
+
+ sync_pullOpLog_applyOperation(op);
+ n++;
+ }
+ }
return true;
}
-/* note: not yet in mutex at this point.
+/* note: not yet in mutex at this point.
returns true if everything happy. return false if you want to reconnect.
*/
-bool ReplSource::sync() {
+bool ReplSource::sync() {
ReplInfo r("sync");
- if( !quiet )
+ if ( !quiet )
log() << "pull: " << sourceName() << '@' << hostName << endl;
- nClonedThisPass = 0;
+ nClonedThisPass = 0;
- if( (string("localhost") == hostName || string("127.0.0.1") == hostName) && port == DBPort ) {
+ if ( (string("localhost") == hostName || string("127.0.0.1") == hostName) && port == DBPort ) {
log() << "pull: can't sync from self (localhost). sources configuration may be wrong." << endl;
- sleepsecs(5);
+ sleepsecs(5);
return false;
}
- if( conn.get() == 0 ) {
- conn = auto_ptr<DBClientConnection>(new DBClientConnection());
- string errmsg;
+ if ( conn.get() == 0 ) {
+ conn = auto_ptr<DBClientConnection>(new DBClientConnection());
+ string errmsg;
ReplInfo r("trying to connect to sync source");
- if( !conn->connect(hostName.c_str(), errmsg) ) {
- resetConnection();
- log() << "pull: cantconn " << errmsg << endl;
- if( replPair && paired ) {
+ if ( !conn->connect(hostName.c_str(), errmsg) ) {
+ resetConnection();
+ log() << "pull: cantconn " << errmsg << endl;
+ if ( replPair && paired ) {
assert( startsWith(hostName.c_str(), replPair->remoteHost.c_str()) );
replPair->arbitrate();
}
@@ -860,27 +882,27 @@ bool ReplSource::sync() {
ReplInfo r("can't connect to sync source, sleeping");
sleepsecs(1);
}
- return false;
- }
- }
+ return false;
+ }
+ }
- if( paired )
+ if ( paired )
replPair->negotiate(conn.get());
-/*
- // get current mtime at the server.
- BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
- BSONElement e = o.findElement("optime");
- if( e.eoo() ) {
- log() << "pull: failed to get cur optime from master" << endl;
- log() << " " << o.toString() << endl;
- return false;
- }
- uassert( e.type() == Date );
- OpTime serverCurTime;
- serverCurTime.asDate() = e.date();
-*/
- return sync_pullOpLog();
+ /*
+ // get current mtime at the server.
+ BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
+ BSONElement e = o.findElement("optime");
+ if( e.eoo() ) {
+ log() << "pull: failed to get cur optime from master" << endl;
+ log() << " " << o.toString() << endl;
+ return false;
+ }
+ uassert( e.type() == Date );
+ OpTime serverCurTime;
+ serverCurTime.asDate() = e.date();
+ */
+ return sync_pullOpLog();
}
/* -- Logging of operations -------------------------------------*/
@@ -892,7 +914,7 @@ Database *localOplogClient = 0;
/* we write to local.opload.$main:
{ ts : ..., op: ..., ns: ..., o: ... }
ts: an OpTime timestamp
- op:
+ op:
"i" insert
"u" update
"d" delete
@@ -906,57 +928,57 @@ Database *localOplogClient = 0;
thus, the slave does not need to copy down all the data when it sees this.
*/
void _logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *o2, bool *bb) {
- if( strncmp(ns, "local.", 6) == 0 )
- return;
+ if ( strncmp(ns, "local.", 6) == 0 )
+ return;
- Database *oldClient = database;
+ Database *oldClient = database;
bool haveLogged = database && database->haveLogged();
- /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
- instead we do a single copy to the destination position in the memory mapped file.
+ /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
+ instead we do a single copy to the destination position in the memory mapped file.
*/
- BSONObjBuilder b;
- b.appendDate("ts", OpTime::now().asDate());
- b.append("op", opstr);
- b.append("ns", ns);
- if( bb )
- b.appendBool("b", *bb);
- if( o2 )
- b.append("o2", *o2);
- if( !haveLogged ) {
+ BSONObjBuilder b;
+ b.appendDate("ts", OpTime::now().asDate());
+ b.append("op", opstr);
+ b.append("ns", ns);
+ if ( bb )
+ b.appendBool("b", *bb);
+ if ( o2 )
+ b.append("o2", *o2);
+ if ( !haveLogged ) {
b.appendBool("first", true);
- if( database ) // null on dropDatabase()'s logging.
+ if ( database ) // null on dropDatabase()'s logging.
database->setHaveLogged();
}
- BSONObj partial = b.done();
- int posz = partial.objsize();
- int len = posz + obj.objsize() + 1 + 2 /*o:*/;
-
- if( localOplogMainDetails == 0 ) {
- setClientTempNs("local.");
- localOplogClient = database;
- localOplogMainDetails = nsdetails("local.oplog.$main");
- }
- database = localOplogClient;
-
- Record *r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, "local.oplog.$main", len);
-
- char *p = r->data;
- memcpy(p, partial.objdata(), posz);
- *((unsigned *)p) += obj.objsize() + 1 + 2;
- p += posz - 1;
- *p++ = (char) Object;
- *p++ = 'o';
- *p++ = 0;
- memcpy(p, obj.objdata(), obj.objsize());
- p += obj.objsize();
- *p = EOO;
+ BSONObj partial = b.done();
+ int posz = partial.objsize();
+ int len = posz + obj.objsize() + 1 + 2 /*o:*/;
+
+ if ( localOplogMainDetails == 0 ) {
+ setClientTempNs("local.");
+ localOplogClient = database;
+ localOplogMainDetails = nsdetails("local.oplog.$main");
+ }
+ database = localOplogClient;
+
+ Record *r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, "local.oplog.$main", len);
+
+ char *p = r->data;
+ memcpy(p, partial.objdata(), posz);
+ *((unsigned *)p) += obj.objsize() + 1 + 2;
+ p += posz - 1;
+ *p++ = (char) Object;
+ *p++ = 'o';
+ *p++ = 0;
+ memcpy(p, obj.objdata(), obj.objsize());
+ p += obj.objsize();
+ *p = EOO;
//BSONObj temp(r);
//cout << "temp:" << temp.toString() << endl;
- database = oldClient;
+ database = oldClient;
}
/* --------------------------------------------------------------*/
@@ -968,14 +990,14 @@ _ reuse that cursor when we can
*/
/* returns: # of seconds to sleep before next pass */
-int _replMain(vector<ReplSource*>& sources) {
- {
+int _replMain(vector<ReplSource*>& sources) {
+ {
ReplInfo r("replMain load sources");
dblock lk;
ReplSource::loadAll(sources);
}
-
- if( sources.empty() ) {
+
+ if ( sources.empty() ) {
/* replication is not configured yet (for --slave) in local.sources. Poll for config it
every 20 seconds.
*/
@@ -983,62 +1005,62 @@ int _replMain(vector<ReplSource*>& sources) {
}
bool sleep = true;
- for( vector<ReplSource*>::iterator i = sources.begin(); i != sources.end(); i++ ) {
- ReplSource *s = *i;
- bool ok = false;
+ for ( vector<ReplSource*>::iterator i = sources.begin(); i != sources.end(); i++ ) {
+ ReplSource *s = *i;
+ bool ok = false;
try {
ok = s->sync();
bool moreToSync = s->haveMoreDbsToSync();
sleep = !moreToSync;
- if( ok && !moreToSync /*&& !s->syncedTo.isNull()*/ ) {
+ if ( ok && !moreToSync /*&& !s->syncedTo.isNull()*/ ) {
pairSync.setInitialSyncCompletedLocking();
}
}
- catch( SyncException& ) {
+ catch ( SyncException& ) {
log() << "caught SyncException, sleeping 10 secs" << endl;
return 10;
}
- catch( AssertionException& e ) {
- if( e.severe() ) {
+ catch ( AssertionException& e ) {
+ if ( e.severe() ) {
log() << "replMain caught AssertionException, sleeping 1 minutes" << endl;
return 60;
}
- else {
+ else {
log() << e.toString() << '\n';
}
replInfo = "replMain caught AssertionException";
}
- if( !ok )
+ if ( !ok )
s->resetConnection();
}
- if( sleep ) {
+ if ( sleep ) {
return 3;
}
return 0;
}
-void replMain() {
- vector<ReplSource*> sources;
- while( 1 ) {
+void replMain() {
+ vector<ReplSource*> sources;
+ while ( 1 ) {
int s = 0;
{
dblock lk;
- if( allDead )
+ if ( allDead )
break;
assert( syncing == 0 );
syncing++;
}
- try {
+ try {
s = _replMain(sources);
- } catch(...) {
+ } catch (...) {
cout << "TEMP: caught exception in _replMain" << endl;
- }
+ }
{
dblock lk;
assert( syncing == 1 );
syncing--;
}
- if( s ) {
+ if ( s ) {
stringstream ss;
ss << "replMain: sleep " << s << " before next pass";
string msg = ss.str();
@@ -1053,93 +1075,93 @@ void replMain() {
int debug_stop_repl = 0;
-void replSlaveThread() {
+void replSlaveThread() {
sleepsecs(1);
{
dblock lk;
BSONObj obj;
- if( getSingleton("local.pair.startup", obj) ) {
+ if ( getSingleton("local.pair.startup", obj) ) {
// should be: {replacepeer:1}
replacePeer = true;
pairSync.setInitialSyncCompleted(); // we are the half that has all the data
}
}
- while( 1 ) {
- try {
- replMain();
- if( debug_stop_repl )
- break;
- sleepsecs(5);
- }
- catch( AssertionException& ) {
- ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
- problem() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
- sleepsecs(300);
- }
- }
+ while ( 1 ) {
+ try {
+ replMain();
+ if ( debug_stop_repl )
+ break;
+ sleepsecs(5);
+ }
+ catch ( AssertionException& ) {
+ ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
+ problem() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ }
+ }
}
/* used to verify that slave knows what databases we have */
-void logOurDbsPresence() {
- path dbs(dbpath);
+void logOurDbsPresence() {
+ path dbs(dbpath);
directory_iterator end;
directory_iterator i(dbs);
- dblock lk;
-
- while( i != end ) {
- path p = *i;
- string f = p.leaf();
- if( endsWith(f.c_str(), ".ns") ) {
- /* note: we keep trailing "." so that when slave calls setClient(ns) everything is happy; e.g.,
- valid namespaces must always have a dot, even though here it is just a placeholder not
- a real one
- */
- string dbname = string(f.c_str(), f.size() - 2);
- if( dbname != "local." ) {
- setClientTempNs(dbname.c_str());
- logOp("db", dbname.c_str(), emptyObj);
- }
- }
- i++;
+ dblock lk;
+
+ while ( i != end ) {
+ path p = *i;
+ string f = p.leaf();
+ if ( endsWith(f.c_str(), ".ns") ) {
+ /* note: we keep trailing "." so that when slave calls setClient(ns) everything is happy; e.g.,
+ valid namespaces must always have a dot, even though here it is just a placeholder not
+ a real one
+ */
+ string dbname = string(f.c_str(), f.size() - 2);
+ if ( dbname != "local." ) {
+ setClientTempNs(dbname.c_str());
+ logOp("db", dbname.c_str(), emptyObj);
+ }
+ }
+ i++;
}
database = 0;
}
/* we have to log the db presence periodically as that "advertisement" will roll out of the log
- as it is of finite length. also as we only do one db cloning per pass, we could skip over a bunch of
- advertisements and thus need to see them again later. so this mechanism can actually be very slow to
+ as it is of finite length. also as we only do one db cloning per pass, we could skip over a bunch of
+ advertisements and thus need to see them again later. so this mechanism can actually be very slow to
work, and should be improved.
*/
-void replMasterThread() {
+void replMasterThread() {
sleepsecs(15);
logOurDbsPresence();
- // if you are testing, you might finish test and shutdown in less than 10
- // minutes yet not have done something in first 15 -- this is to exercise
+ // if you are testing, you might finish test and shutdown in less than 10
+ // minutes yet not have done something in first 15 -- this is to exercise
// this code some.
- sleepsecs(90);
- logOurDbsPresence();
+ sleepsecs(90);
+ logOurDbsPresence();
- while( 1 ) {
- logOurDbsPresence();
- sleepsecs(60 * 10);
- }
+ while ( 1 ) {
+ logOurDbsPresence();
+ sleepsecs(60 * 10);
+ }
}
-void tempThread() {
- while( 1 ) {
+void tempThread() {
+ while ( 1 ) {
cout << dbMutexInfo.isLocked() << endl;
sleepmillis(100);
}
}
-void startReplication() {
- /* this was just to see if anything locks for longer than it should -- we need to be careful
+void startReplication() {
+ /* this was just to see if anything locks for longer than it should -- we need to be careful
not to be locked when trying to connect() or query() the other side.
*/
//boost::thread tempt(tempThread);
@@ -1149,38 +1171,38 @@ void startReplication() {
pairSync.init();
}
- if( slave || replPair ) {
- if( slave && !quiet )
- log() << "slave=true" << endl;
- slave = true;
- boost::thread repl_thread(replSlaveThread);
- }
-
- if( master || replPair ) {
- if( master && !quiet )
- log() << "master=true" << endl;
- master = true;
- {
- dblock lk;
- /* create an oplog collection, if it doesn't yet exist. */
- BSONObjBuilder b;
+ if ( slave || replPair ) {
+ if ( slave && !quiet )
+ log() << "slave=true" << endl;
+ slave = true;
+ boost::thread repl_thread(replSlaveThread);
+ }
+
+ if ( master || replPair ) {
+ if ( master && !quiet )
+ log() << "master=true" << endl;
+ master = true;
+ {
+ dblock lk;
+ /* create an oplog collection, if it doesn't yet exist. */
+ BSONObjBuilder b;
double sz = 50.0 * 1000 * 1000;
- if( sizeof(int *) >= 8 )
+ if ( sizeof(int *) >= 8 )
sz = 990.0 * 1000 * 1000;
- b.append("size", sz);
- b.appendBool("capped", 1);
- setClientTempNs("local.oplog.$main");
- string err;
- BSONObj o = b.done();
- userCreateNS("local.oplog.$main", o, err, false);
- database = 0;
- }
-
- boost::thread mt(replMasterThread);
- }
+ b.append("size", sz);
+ b.appendBool("capped", 1);
+ setClientTempNs("local.oplog.$main");
+ string err;
+ BSONObj o = b.done();
+ userCreateNS("local.oplog.$main", o, err, false);
+ database = 0;
+ }
+
+ boost::thread mt(replMasterThread);
+ }
}
/* called from main at server startup */
void pairWith(const char *remoteEnd, const char *arb) {
- replPair = new ReplPair(remoteEnd, arb);
+ replPair = new ReplPair(remoteEnd, arb);
}
diff --git a/db/repl.h b/db/repl.h
index 74b01f96b61..f2baeb29a4a 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -2,23 +2,23 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* replication data overview
- at the slave:
+ at the slave:
local.sources { host: ..., source: ..., syncedTo: ..., dbs: { ... } }
at the master:
@@ -38,57 +38,71 @@ extern bool master;
bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication, bool slaveOk);
#pragma pack(push,4)
-class OpTime {
- unsigned i;
- unsigned secs;
+class OpTime {
+ unsigned i;
+ unsigned secs;
public:
- unsigned getSecs() const { return secs; }
- OpTime(unsigned long long date) {
- reinterpret_cast<unsigned long long&>(*this) = date;
- }
- OpTime(unsigned a, unsigned b) { secs = a; i = b; }
- OpTime() { secs = 0; i = 0; }
- static OpTime now();
-
- /* We store OpTime's in the database as Javascript Date datatype -- we needed some sort of
- 64 bit "container" for these values. While these are not really "Dates", that seems a
- better choice for now than say, Number, which is floating point. Note the BinData type
- is perhaps the cleanest choice, lacking a true unsigned64 datatype, but BinData has a
- couple bytes of overhead.
- */
- unsigned long long asDate() const { return *((unsigned long long *) &i); }
-// unsigned long long& asDate() { return *((unsigned long long *) &i); }
-
- bool isNull() { return secs == 0; }
-
- string toStringLong() const {
- char buf[64];
- time_t_to_String(secs, buf);
- stringstream ss;
- ss << buf << ' ';
- ss << hex << secs << ':' << i;
- return ss.str();
- }
-
- string toString() const {
- stringstream ss;
- ss << hex << secs << ':' << i;
- return ss.str();
- }
- bool operator==(const OpTime& r) const {
- return i == r.i && secs == r.secs;
- }
- bool operator!=(const OpTime& r) const { return !(*this == r); }
- bool operator<(const OpTime& r) const {
- if( secs != r.secs )
- return secs < r.secs;
- return i < r.i;
- }
+ unsigned getSecs() const {
+ return secs;
+ }
+ OpTime(unsigned long long date) {
+ reinterpret_cast<unsigned long long&>(*this) = date;
+ }
+ OpTime(unsigned a, unsigned b) {
+ secs = a;
+ i = b;
+ }
+ OpTime() {
+ secs = 0;
+ i = 0;
+ }
+ static OpTime now();
+
+ /* We store OpTime's in the database as Javascript Date datatype -- we needed some sort of
+ 64 bit "container" for these values. While these are not really "Dates", that seems a
+ better choice for now than say, Number, which is floating point. Note the BinData type
+ is perhaps the cleanest choice, lacking a true unsigned64 datatype, but BinData has a
+ couple bytes of overhead.
+ */
+ unsigned long long asDate() const {
+ return *((unsigned long long *) &i);
+ }
+// unsigned long long& asDate() { return *((unsigned long long *) &i); }
+
+ bool isNull() {
+ return secs == 0;
+ }
+
+ string toStringLong() const {
+ char buf[64];
+ time_t_to_String(secs, buf);
+ stringstream ss;
+ ss << buf << ' ';
+ ss << hex << secs << ':' << i;
+ return ss.str();
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << hex << secs << ':' << i;
+ return ss.str();
+ }
+ bool operator==(const OpTime& r) const {
+ return i == r.i && secs == r.secs;
+ }
+ bool operator!=(const OpTime& r) const {
+ return !(*this == r);
+ }
+ bool operator<(const OpTime& r) const {
+ if ( secs != r.secs )
+ return secs < r.secs;
+ return i < r.i;
+ }
};
#pragma pack(pop)
/* A replication exception */
-struct SyncException {
+struct SyncException {
};
/* A Source is a source from which we can pull (replicate) data.
@@ -98,58 +112,62 @@ struct SyncException {
{ host: ..., source: ..., syncedTo: ..., dbs: { ... } }
- 'source' defaults to 'main'; support for multiple source names is
+ 'source' defaults to 'main'; support for multiple source names is
not done (always use main for now).
*/
class ReplSource {
- bool resync(string db);
- bool sync_pullOpLog();
- void sync_pullOpLog_applyOperation(BSONObj& op);
+ bool resync(string db);
+ bool sync_pullOpLog();
+ void sync_pullOpLog_applyOperation(BSONObj& op);
- auto_ptr<DBClientConnection> conn;
- auto_ptr<DBClientCursor> cursor;
+ auto_ptr<DBClientConnection> conn;
+ auto_ptr<DBClientCursor> cursor;
- set<string> addDbNextPass;
+ set<string> addDbNextPass;
- ReplSource();
+ ReplSource();
public:
bool replacing; // in "replace mode" -- see CmdReplacePeer
- bool paired; // --pair in use
- string hostName; // ip addr or hostname plus optionally, ":<port>"
- string _sourceName; // a logical source name.
- string sourceName() const { return _sourceName.empty() ? "main" : _sourceName; }
- string only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
-
- /* the last time point we have already synced up to. */
- OpTime syncedTo;
-
- /* list of databases that we have synced.
- we need this so that if we encounter a new one, we know
- to go fetch the old data.
- */
- set<string> dbs;
-
- int nClonedThisPass;
-
- static void loadAll(vector<ReplSource*>&);
- static void cleanup(vector<ReplSource*>&);
- ReplSource(BSONObj);
- bool sync();
- void save(); // write ourself to local.sources
- void resetConnection() {
- conn = auto_ptr<DBClientConnection>(0);
+ bool paired; // --pair in use
+ string hostName; // ip addr or hostname plus optionally, ":<port>"
+ string _sourceName; // a logical source name.
+ string sourceName() const {
+ return _sourceName.empty() ? "main" : _sourceName;
+ }
+ string only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
+
+ /* the last time point we have already synced up to. */
+ OpTime syncedTo;
+
+ /* list of databases that we have synced.
+ we need this so that if we encounter a new one, we know
+ to go fetch the old data.
+ */
+ set<string> dbs;
+
+ int nClonedThisPass;
+
+ static void loadAll(vector<ReplSource*>&);
+ static void cleanup(vector<ReplSource*>&);
+ ReplSource(BSONObj);
+ bool sync();
+ void save(); // write ourself to local.sources
+ void resetConnection() {
+ conn = auto_ptr<DBClientConnection>(0);
cursor = auto_ptr<DBClientCursor>(0);
}
- // make a jsobj from our member fields of the form
- // { host: ..., source: ..., syncedTo: ... }
- BSONObj jsobj();
-
- bool operator==(const ReplSource&r) const {
- return hostName == r.hostName && sourceName() == r.sourceName();
- }
+ // make a jsobj from our member fields of the form
+ // { host: ..., source: ..., syncedTo: ... }
+ BSONObj jsobj();
+
+ bool operator==(const ReplSource&r) const {
+ return hostName == r.hostName && sourceName() == r.sourceName();
+ }
- bool haveMoreDbsToSync() const { return !addDbNextPass.empty(); }
+ bool haveMoreDbsToSync() const {
+ return !addDbNextPass.empty();
+ }
};
/* Write operation to the log (local.oplog.$main)
@@ -161,6 +179,6 @@ public:
*/
void _logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *patt, bool *b);
inline void logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *patt = 0, bool *b = 0) {
- if( master )
- _logOp(opstr, ns, obj, patt, b);
+ if ( master )
+ _logOp(opstr, ns, obj, patt, b);
}
diff --git a/db/replset.h b/db/replset.h
index 86162d32076..ec4faffc159 100644
--- a/db/replset.h
+++ b/db/replset.h
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -29,13 +29,13 @@ extern const char *allDead;
You may read from the slave at anytime though (if you don't mind the slight lag).
- todo: Could be extended to be more than a pair, thus the name 'Set' -- for example,
+ todo: Could be extended to be more than a pair, thus the name 'Set' -- for example,
a set of 3...
*/
-class ReplPair {
+class ReplPair {
public:
- enum {
+ enum {
State_CantArb = -3,
State_Confused = -2,
State_Negotiating = -1,
@@ -46,16 +46,16 @@ public:
int state;
string info; // commentary about our current state
string arbHost; // "-" for no arbiter. "host[:port]"
- int remotePort;
- string remoteHost;
- string remote; // host:port if port specified.
+ int remotePort;
+ string remoteHost;
+ string remote; // host:port if port specified.
// int date; // -1 not yet set; 0=slave; 1=master
string getInfo() {
stringstream ss;
ss << " state: ";
- if( state == 1 ) ss << "1 State_Master ";
- else if( state == 0 ) ss << "0 State_Slave";
+ if ( state == 1 ) ss << "1 State_Master ";
+ else if ( state == 0 ) ss << "0 State_Slave";
else
ss << "<b>" << state << "</b>";
ss << '\n';
@@ -66,19 +66,19 @@ public:
return ss.str();
}
- ReplPair(const char *remoteEnd, const char *arbiter);
+ ReplPair(const char *remoteEnd, const char *arbiter);
- bool dominant(const string& myname) {
- if( myname == remoteHost )
+ bool dominant(const string& myname) {
+ if ( myname == remoteHost )
return port > remotePort;
return myname > remoteHost;
}
- void setMasterLocked( int n, const char *_comment = "" ) {
- dblock p;
- setMaster( n, _comment );
- }
-
+ void setMasterLocked( int n, const char *_comment = "" ) {
+ dblock p;
+ setMaster( n, _comment );
+ }
+
void setMaster(int n, const char *_comment = "");
/* negotiate with our peer who is master */
@@ -88,8 +88,8 @@ public:
void arbitrate();
virtual
- DBClientConnection *newClientConnection() const {
- return new DBClientConnection();
+ DBClientConnection *newClientConnection() const {
+ return new DBClientConnection();
}
};
@@ -97,18 +97,18 @@ extern ReplPair *replPair;
/* note we always return true for the "local" namespace.
- we should not allow most operations when not the master
+ we should not allow most operations when not the master
also we report not master if we are "dead".
See also CmdIsMaster.
*/
-inline bool isMaster() {
- if( allDead ) {
+inline bool isMaster() {
+ if ( allDead ) {
return database->name == "local";
}
- if( replPair == 0 || replPair->state == ReplPair::State_Master )
+ if ( replPair == 0 || replPair->state == ReplPair::State_Master )
return true;
return database->name == "local";
@@ -116,17 +116,17 @@ inline bool isMaster() {
inline ReplPair::ReplPair(const char *remoteEnd, const char *arb) {
state = -1;
- remote = remoteEnd;
- remotePort = DBPort;
- remoteHost = remoteEnd;
- const char *p = strchr(remoteEnd, ':');
- if( p ) {
- remoteHost = string(remoteEnd, p-remoteEnd);
- remotePort = atoi(p+1);
- uassert("bad port #", remotePort > 0 && remotePort < 0x10000 );
- if( remotePort == DBPort )
- remote = remoteHost; // don't include ":27017" as it is default; in case ran in diff ways over time to normalizke the hostname format in sources collection
- }
+ remote = remoteEnd;
+ remotePort = DBPort;
+ remoteHost = remoteEnd;
+ const char *p = strchr(remoteEnd, ':');
+ if ( p ) {
+ remoteHost = string(remoteEnd, p-remoteEnd);
+ remotePort = atoi(p+1);
+ uassert("bad port #", remotePort > 0 && remotePort < 0x10000 );
+ if ( remotePort == DBPort )
+ remote = remoteHost; // don't include ":27017" as it is default; in case ran in diff ways over time to normalizke the hostname format in sources collection
+ }
uassert("arbiter parm is missing, use '-' for none", arb);
arbHost = arb;
diff --git a/db/resource.h b/db/resource.h
index 979c5302b4f..f22354edac9 100644
--- a/db/resource.h
+++ b/db/resource.h
@@ -4,22 +4,22 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Next default values for new objects
-//
+//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
#define _APS_NEXT_RESOURCE_VALUE 101
diff --git a/db/scanandorder.h b/db/scanandorder.h
index 6a1619bae6f..664f9265045 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -4,16 +4,16 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -29,17 +29,17 @@
class KeyType : boost::noncopyable {
public:
- BSONObj pattern; // e.g., { ts : -1 }
+ BSONObj pattern; // e.g., { ts : -1 }
public:
- KeyType(BSONObj _keyPattern) {
- pattern = _keyPattern;
- assert( !pattern.isEmpty() );
- }
-
- // returns the key value for o
- BSONObj getKeyFromObject(BSONObj o) {
- return o.extractFields(pattern);
- }
+ KeyType(BSONObj _keyPattern) {
+ pattern = _keyPattern;
+ assert( !pattern.isEmpty() );
+ }
+
+ // returns the key value for o
+ BSONObj getKeyFromObject(BSONObj o) {
+ return o.extractFields(pattern);
+ }
};
/* todo:
@@ -48,21 +48,21 @@ public:
_ response size limit from runquery; push it up a bit.
*/
-inline bool fillQueryResultFromObj(BufBuilder& b, set<string> *filter, BSONObj& js) {
- if( filter ) {
- BSONObj x;
- bool ok = x.addFields(js, *filter) > 0;
- if( ok )
- b.append((void*) x.objdata(), x.objsize());
- return ok;
- }
-
- b.append((void*) js.objdata(), js.objsize());
- return true;
+inline bool fillQueryResultFromObj(BufBuilder& b, set<string> *filter, BSONObj& js) {
+ if ( filter ) {
+ BSONObj x;
+ bool ok = x.addFields(js, *filter) > 0;
+ if ( ok )
+ b.append((void*) x.objdata(), x.objsize());
+ return ok;
+ }
+
+ b.append((void*) js.objdata(), js.objsize());
+ return true;
}
typedef multimap<BSONObj,BSONObj> BestMap;
-class ScanAndOrder {
+class ScanAndOrder {
BestMap best; // key -> full object
int startFrom;
int limit; // max to send back.
@@ -70,7 +70,7 @@ class ScanAndOrder {
int dir;
unsigned approxSize;
- void _add(BSONObj& k, BSONObj o) {
+ void _add(BSONObj& k, BSONObj o) {
best.insert(make_pair(k,o));
}
@@ -78,7 +78,7 @@ class ScanAndOrder {
void _addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i) {
const BSONObj& worstBestKey = i->first;
int c = worstBestKey.woCompare(k);
- if( (c<0 && dir<0) || (c>0&&dir>0) ) {
+ if ( (c<0 && dir<0) || (c>0&&dir>0) ) {
// k is better, 'upgrade'
best.erase(i);
_add(k, o);
@@ -86,69 +86,72 @@ class ScanAndOrder {
}
public:
- ScanAndOrder(int _startFrom, int _limit, BSONObj _order) :
- startFrom(_startFrom), order(_order) {
- limit = _limit > 0 ? _limit + startFrom : 0x7fffffff;
- approxSize = 0;
-
- // todo: do order right for compound keys. this is temp.
- dir = 1;
- BSONElement e = order.pattern.firstElement();
- if( e.number() < 0 ) {
- dir = -1;
- }
- }
-
- int size() const { return best.size(); }
-
- void add(BSONObj o) {
- BSONObj k = order.getKeyFromObject(o);
- if( (int) best.size() < limit ) {
- approxSize += k.objsize();
- uassert( "too much key data for sort() with no index", approxSize < 1 * 1024 * 1024 );
- _add(k, o);
- return;
- }
- BestMap::iterator i;
- if( dir < 0 )
- i = best.begin();
- else {
- assert( best.end() != best.begin() );
- i = best.end(); i--;
- }
- _addIfBetter(k, o, i);
- }
-
- template<class T>
- void _fill(BufBuilder& b, set<string> *filter, int& nout, T begin, T end) {
- int n = 0;
- int nFilled = 0;
- for( T i = begin; i != end; i++ ) {
- n++;
- if( n <= startFrom )
- continue;
- BSONObj& o = i->second;
- if( fillQueryResultFromObj(b, filter, o) ) {
- nFilled++;
- if( nFilled >= limit )
- goto done;
- uassert( "too much data for sort() with no index", b.len() < 4000000 ); // appserver limit
- }
- }
+ ScanAndOrder(int _startFrom, int _limit, BSONObj _order) :
+ startFrom(_startFrom), order(_order) {
+ limit = _limit > 0 ? _limit + startFrom : 0x7fffffff;
+ approxSize = 0;
+
+ // todo: do order right for compound keys. this is temp.
+ dir = 1;
+ BSONElement e = order.pattern.firstElement();
+ if ( e.number() < 0 ) {
+ dir = -1;
+ }
+ }
+
+ int size() const {
+ return best.size();
+ }
+
+ void add(BSONObj o) {
+ BSONObj k = order.getKeyFromObject(o);
+ if ( (int) best.size() < limit ) {
+ approxSize += k.objsize();
+ uassert( "too much key data for sort() with no index", approxSize < 1 * 1024 * 1024 );
+ _add(k, o);
+ return;
+ }
+ BestMap::iterator i;
+ if ( dir < 0 )
+ i = best.begin();
+ else {
+ assert( best.end() != best.begin() );
+ i = best.end();
+ i--;
+ }
+ _addIfBetter(k, o, i);
+ }
+
+ template<class T>
+ void _fill(BufBuilder& b, set<string> *filter, int& nout, T begin, T end) {
+ int n = 0;
+ int nFilled = 0;
+ for ( T i = begin; i != end; i++ ) {
+ n++;
+ if ( n <= startFrom )
+ continue;
+ BSONObj& o = i->second;
+ if ( fillQueryResultFromObj(b, filter, o) ) {
+ nFilled++;
+ if ( nFilled >= limit )
+ goto done;
+ uassert( "too much data for sort() with no index", b.len() < 4000000 ); // appserver limit
+ }
+ }
done:
- nout = nFilled;
- }
-
- /* scanning complete. stick the query result in b for n objects. */
- void fill(BufBuilder& b, set<string> *filter, int& nout) {
- // for( BestMap::iterator i = best.begin(); i != best.end(); i++ )
- // cout << " fill:" << i->first.toString() << endl;
- // for( BestMap::reverse_iterator i = best.rbegin(); i != best.rend(); i++ )
- // cout << " fillr:" << i->first.toString() << endl;
- if( dir > 0 )
- _fill(b, filter, nout, best.begin(), best.end());
- else
- _fill(b, filter, nout, best.rbegin(), best.rend());
- }
+ nout = nFilled;
+ }
+
+ /* scanning complete. stick the query result in b for n objects. */
+ void fill(BufBuilder& b, set<string> *filter, int& nout) {
+ // for( BestMap::iterator i = best.begin(); i != best.end(); i++ )
+ // cout << " fill:" << i->first.toString() << endl;
+ // for( BestMap::reverse_iterator i = best.rbegin(); i != best.rend(); i++ )
+ // cout << " fillr:" << i->first.toString() << endl;
+ if ( dir > 0 )
+ _fill(b, filter, nout, best.begin(), best.end());
+ else
+ _fill(b, filter, nout, best.rbegin(), best.rend());
+ }
};
diff --git a/db/storage.h b/db/storage.h
index 5e4f43f77f3..f53a732b2e6 100644
--- a/db/storage.h
+++ b/db/storage.h
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -32,78 +32,111 @@ class BSONObj;
class PhysicalDataFile;
class DiskLoc {
- int fileNo; /* this will be volume, file #, etc. */
- int ofs;
+ int fileNo; /* this will be volume, file #, etc. */
+ int ofs;
public:
- enum { NullOfs = -1, MaxFiles=4000 };
- int a() const { return fileNo; }
- DiskLoc(int a, int b) : fileNo(a), ofs(b) {
- assert(ofs!=0);
- }
- DiskLoc() { fileNo = -1; ofs = NullOfs; }
-
- DiskLoc(const DiskLoc& l) { fileNo=l.fileNo; ofs=l.ofs; }
-
- bool questionable() {
- return ofs < -1 ||
- fileNo < -1 ||
- fileNo > 20;
- }
-
- bool isNull() const { return ofs == NullOfs; }
- void Null() { fileNo = -1; ofs = NullOfs; }
- void setInvalid() { fileNo = -2; }
- void assertOk() { assert(!isNull()); }
-
- string toString() const {
- if( isNull() )
- return "null";
- stringstream ss;
- ss << hex << fileNo << ':' << ofs;
- return ss.str();
- }
-
- int& GETOFS() { return ofs; }
- int getOfs() const { return ofs; }
- void set(int a, int b) { fileNo=a; ofs=b; }
- void setOfs(int _fileNo, int _ofs) {
- fileNo = _fileNo;
- ofs = _ofs;
- }
-
- void inc(int amt) {
- assert( !isNull() );
- ofs += amt;
- }
-
- bool sameFile(DiskLoc b) { return fileNo == b.fileNo; }
-
- bool operator==(const DiskLoc& b) const { return fileNo==b.fileNo && ofs == b.ofs; }
- bool operator!=(const DiskLoc& b) const { return !(*this==b); }
- const DiskLoc& operator=(const DiskLoc& b) {
- fileNo=b.fileNo; ofs = b.ofs;
- assert(ofs!=0);
- return *this;
- }
- int compare(const DiskLoc& b) const {
- int x = fileNo - b.fileNo;
- if( x )
- return x;
+ enum { NullOfs = -1, MaxFiles=4000 };
+ int a() const {
+ return fileNo;
+ }
+ DiskLoc(int a, int b) : fileNo(a), ofs(b) {
+ assert(ofs!=0);
+ }
+ DiskLoc() {
+ fileNo = -1;
+ ofs = NullOfs;
+ }
+
+ DiskLoc(const DiskLoc& l) {
+ fileNo=l.fileNo;
+ ofs=l.ofs;
+ }
+
+ bool questionable() {
+ return ofs < -1 ||
+ fileNo < -1 ||
+ fileNo > 20;
+ }
+
+ bool isNull() const {
+ return ofs == NullOfs;
+ }
+ void Null() {
+ fileNo = -1;
+ ofs = NullOfs;
+ }
+ void setInvalid() {
+ fileNo = -2;
+ }
+ void assertOk() {
+ assert(!isNull());
+ }
+
+ string toString() const {
+ if ( isNull() )
+ return "null";
+ stringstream ss;
+ ss << hex << fileNo << ':' << ofs;
+ return ss.str();
+ }
+
+ int& GETOFS() {
+ return ofs;
+ }
+ int getOfs() const {
+ return ofs;
+ }
+ void set(int a, int b) {
+ fileNo=a;
+ ofs=b;
+ }
+ void setOfs(int _fileNo, int _ofs) {
+ fileNo = _fileNo;
+ ofs = _ofs;
+ }
+
+ void inc(int amt) {
+ assert( !isNull() );
+ ofs += amt;
+ }
+
+ bool sameFile(DiskLoc b) {
+ return fileNo == b.fileNo;
+ }
+
+ bool operator==(const DiskLoc& b) const {
+ return fileNo==b.fileNo && ofs == b.ofs;
+ }
+ bool operator!=(const DiskLoc& b) const {
+ return !(*this==b);
+ }
+ const DiskLoc& operator=(const DiskLoc& b) {
+ fileNo=b.fileNo;
+ ofs = b.ofs;
+ assert(ofs!=0);
+ return *this;
+ }
+ int compare(const DiskLoc& b) const {
+ int x = fileNo - b.fileNo;
+ if ( x )
+ return x;
return ofs - b.ofs;
- }
- bool operator<(const DiskLoc& b) const { return compare(b) < 0; }
-
- /* get the "thing" associated with this disk location.
- it is assumed the object is what it is -- you must asure that:
- think of this as an unchecked type cast.
+ }
+ bool operator<(const DiskLoc& b) const {
+ return compare(b) < 0;
+ }
+
+ /* get the "thing" associated with this disk location.
+ it is assumed the object is what it is -- you must asure that:
+ think of this as an unchecked type cast.
*/
- BSONObj obj() const;
- Record* rec() const;
- DeletedRecord* drec() const;
- Extent* ext() const;
- BtreeBucket* btree() const;
+ BSONObj obj() const;
+ Record* rec() const;
+ DeletedRecord* drec() const;
+ Extent* ext() const;
+ BtreeBucket* btree() const;
- PhysicalDataFile& pdf() const;
+ PhysicalDataFile& pdf() const;
};
#pragma pack(pop)
diff --git a/db/tests.cpp b/db/tests.cpp
index 92a9125263e..9dbbb632d3d 100644
--- a/db/tests.cpp
+++ b/db/tests.cpp
@@ -1,15 +1,15 @@
/**
* Copyright (C) 2008 10gen Inc.
-*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
-*
+*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -22,39 +22,42 @@
#include "stdafx.h"
#include "../util/mmap.h"
-int test2_old() {
- cout << "test2" << endl;
- printStackTrace();
- if( 1 )
- return 1;
-
- MemoryMappedFile f;
-
- char *p = (char *) f.map("/tmp/test.dat", 64*1024*1024);
- char *start = p;
- char *end = p + 64*1024*1024-2;
- end[1] = 'z';
- int i;
- while( p < end ) {
- *p++ = ' ';
- if( ++i%64 == 0 ) { *p++ = '\n'; *p++ = 'x'; }
- }
- *p = 'a';
-
- f.flush(true);
- cout << "done" << endl;
-
- char *x = start + 32 * 1024 * 1024;
- char *y = start + 48 * 1024 * 1024;
- char *z = start + 62 * 1024 * 1024;
-
- strcpy(z, "zfoo");
- cout << "y" << endl;
- strcpy(y, "yfoo");
- strcpy(x, "xfoo");
- strcpy(start, "xfoo");
-
- exit(3);
-
- return 1;
+int test2_old() {
+ cout << "test2" << endl;
+ printStackTrace();
+ if ( 1 )
+ return 1;
+
+ MemoryMappedFile f;
+
+ char *p = (char *) f.map("/tmp/test.dat", 64*1024*1024);
+ char *start = p;
+ char *end = p + 64*1024*1024-2;
+ end[1] = 'z';
+ int i;
+ while ( p < end ) {
+ *p++ = ' ';
+ if ( ++i%64 == 0 ) {
+ *p++ = '\n';
+ *p++ = 'x';
+ }
+ }
+ *p = 'a';
+
+ f.flush(true);
+ cout << "done" << endl;
+
+ char *x = start + 32 * 1024 * 1024;
+ char *y = start + 48 * 1024 * 1024;
+ char *z = start + 62 * 1024 * 1024;
+
+ strcpy(z, "zfoo");
+ cout << "y" << endl;
+ strcpy(y, "yfoo");
+ strcpy(x, "xfoo");
+ strcpy(start, "xfoo");
+
+ exit(3);
+
+ return 1;
}