summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenety Goh <benety@mongodb.com>2014-03-18 01:05:56 -0400
committerBenety Goh <benety@mongodb.com>2014-03-18 13:42:17 -0400
commit0de6511419aee109f2fb997e14b596f6295c90a8 (patch)
tree024a1f03da09f7f707316f52bd519a39d0cf551c
parent63861f53c4e12fdd3efbcde57e6a1c8d12e675c0 (diff)
downloadmongo-0de6511419aee109f2fb997e14b596f6295c90a8.tar.gz
SERVER-13241 increased buffer size for stats in curop from 512 to 2048
-rw-r--r--jstests/core/profile4.js6
-rw-r--r--jstests/profile4.js6
-rw-r--r--src/mongo/db/client.cpp2
-rw-r--r--src/mongo/db/curop.h15
4 files changed, 23 insertions, 6 deletions
diff --git a/jstests/core/profile4.js b/jstests/core/profile4.js
index ce211045cd4..63940ed1530 100644
--- a/jstests/core/profile4.js
+++ b/jstests/core/profile4.js
@@ -96,6 +96,12 @@ try {
t.find().sort( {a:1} ).itcount();
checkLastOp( [ [ "scanAndOrder", true ] ] );
+ t.ensureIndex( {a:1} );
+ t.find( {a:1} ).itcount();
+ o = lastOp();
+ assert.eq( "FETCH", o.execStats.type, tojson( o.execStats ) );
+ assert.eq( "IXSCAN", o.execStats.children[0].type, tojson( o.execStats ) );
+
db.setProfilingLevel(0);
db.system.profile.drop();
}
diff --git a/jstests/profile4.js b/jstests/profile4.js
index e9b315e2ae2..83bc0a27c7e 100644
--- a/jstests/profile4.js
+++ b/jstests/profile4.js
@@ -90,6 +90,12 @@ try {
t.find().sort( {a:1} ).itcount();
checkLastOp( [ [ "scanAndOrder", true ] ] );
+ t.ensureIndex( {a:1} );
+ t.find( {a:1} ).itcount();
+ o = lastOp();
+ assert.eq( "FETCH", o.execStats.type, tojson( o.execStats ) );
+ assert.eq( "IXSCAN", o.execStats.children[0].type, tojson( o.execStats ) );
+
db.setProfilingLevel(0);
db.system.profile.drop();
}
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 4a1971380e0..e492503a397 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -238,7 +238,7 @@ namespace mongo {
return false;
}
- BSONObj CachedBSONObj::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}");
+ BSONObj CachedBSONObjBase::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}");
Client::Context::Context(const std::string& ns , Database * db) :
_client( currentClient.get() ),
_oldContext( _client->_context ),
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 9a7ef4bb48d..1226d0f3ed1 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -50,10 +50,15 @@ namespace mongo {
* if its too big for the buffer, says "too big"
* useful for keeping a copy around indefinitely without wasting a lot of space or doing malloc
*/
- class CachedBSONObj {
+ class CachedBSONObjBase {
public:
- enum { TOO_BIG_SENTINEL = 1 } ;
static BSONObj _tooBig; // { $msg : "query not recording (too large)" }
+ };
+
+ template <size_t BUFFER_SIZE>
+ class CachedBSONObj : public CachedBSONObjBase {
+ public:
+ enum { TOO_BIG_SENTINEL = 1 } ;
CachedBSONObj() {
_size = (int*)_buf;
@@ -107,7 +112,7 @@ namespace mongo {
mutable SpinLock _lock;
int * _size;
- char _buf[512];
+ char _buf[BUFFER_SIZE];
};
/* lifespan is different than CurOp because of recursives with DBDirectClient */
@@ -171,7 +176,7 @@ namespace mongo {
// New Query Framework debugging/profiling info
// TODO: should this really be an opaque BSONObj? Not sure.
- CachedBSONObj execStats;
+ CachedBSONObj<2048> execStats;
// error handling
ExceptionInfo exceptionInfo;
@@ -343,7 +348,7 @@ namespace mongo {
AtomicUInt _opNum; // todo: simple being "unsigned" may make more sense here
char _ns[Namespace::MaxNsLen+2];
HostAndPort _remote; // CAREFUL here with thread safety
- CachedBSONObj _query; // CachedBSONObj is thread safe
+ CachedBSONObj<512> _query; // CachedBSONObj is thread safe
OpDebug _debug;
ThreadSafeString _message;
ProgressMeter _progressMeter;