summaryrefslogtreecommitdiff
path: root/src/mongo/db/pipeline/document.cpp
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 00:22:50 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 10:56:02 -0400
commit9c2ed42daa8fbbef4a919c21ec564e2db55e8d60 (patch)
tree3814f79c10d7b490948d8cb7b112ac1dd41ceff1 /src/mongo/db/pipeline/document.cpp
parent01965cf52bce6976637ecb8f4a622aeb05ab256a (diff)
downloadmongo-9c2ed42daa8fbbef4a919c21ec564e2db55e8d60.tar.gz
SERVER-18579: Clang-Format - reformat code, no comment reflow
Diffstat (limited to 'src/mongo/db/pipeline/document.cpp')
-rw-r--r--src/mongo/db/pipeline/document.cpp655
1 files changed, 321 insertions, 334 deletions
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index 741834d39cf..49efa640277 100644
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -37,425 +37,412 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
- using namespace mongoutils;
- using boost::intrusive_ptr;
- using std::string;
- using std::vector;
-
- Position DocumentStorage::findField(StringData requested) const {
- int reqSize = requested.size(); // get size calculation out of the way if needed
-
- if (_numFields >= HASH_TAB_MIN) { // hash lookup
- const unsigned bucket = bucketForKey(requested);
-
- Position pos = _hashTab[bucket];
- while (pos.found()) {
- const ValueElement& elem = getField(pos);
- if (elem.nameLen == reqSize
- && memcmp(requested.rawData(), elem._name, reqSize) == 0) {
- return pos;
- }
-
- // possible collision
- pos = elem.nextCollision;
+using namespace mongoutils;
+using boost::intrusive_ptr;
+using std::string;
+using std::vector;
+
+Position DocumentStorage::findField(StringData requested) const {
+ int reqSize = requested.size(); // get size calculation out of the way if needed
+
+ if (_numFields >= HASH_TAB_MIN) { // hash lookup
+ const unsigned bucket = bucketForKey(requested);
+
+ Position pos = _hashTab[bucket];
+ while (pos.found()) {
+ const ValueElement& elem = getField(pos);
+ if (elem.nameLen == reqSize && memcmp(requested.rawData(), elem._name, reqSize) == 0) {
+ return pos;
}
+
+ // possible collision
+ pos = elem.nextCollision;
}
- else { // linear scan
- for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
- if (it->nameLen == reqSize
- && memcmp(requested.rawData(), it->_name, reqSize) == 0) {
- return it.position();
- }
+ } else { // linear scan
+ for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
+ if (it->nameLen == reqSize && memcmp(requested.rawData(), it->_name, reqSize) == 0) {
+ return it.position();
}
}
-
- // if we got here, there's no such field
- return Position();
}
- Value& DocumentStorage::appendField(StringData name) {
- Position pos = getNextPosition();
- const int nameSize = name.size();
-
- // these are the same for everyone
- const Position nextCollision;
- const Value value;
-
- // Make room for new field (and padding at end for alignment)
- const unsigned newUsed = ValueElement::align(_usedBytes + sizeof(ValueElement) + nameSize);
- if (_buffer + newUsed > _bufferEnd)
- alloc(newUsed);
- _usedBytes = newUsed;
-
- // Append structure of a ValueElement
- char* dest = _buffer + pos.index; // must be after alloc since it changes _buffer
-#define append(x) memcpy(dest, &(x), sizeof(x)); dest += sizeof(x)
- append(value);
- append(nextCollision);
- append(nameSize);
- name.copyTo( dest, true );
- // Padding for alignment handled above
-#undef append
+ // if we got here, there's no such field
+ return Position();
+}
- // Make sure next field starts where we expect it
- fassert(16486, getField(pos).next()->ptr() == _buffer + _usedBytes);
+Value& DocumentStorage::appendField(StringData name) {
+ Position pos = getNextPosition();
+ const int nameSize = name.size();
+
+ // these are the same for everyone
+ const Position nextCollision;
+ const Value value;
+
+ // Make room for new field (and padding at end for alignment)
+ const unsigned newUsed = ValueElement::align(_usedBytes + sizeof(ValueElement) + nameSize);
+ if (_buffer + newUsed > _bufferEnd)
+ alloc(newUsed);
+ _usedBytes = newUsed;
+
+ // Append structure of a ValueElement
+ char* dest = _buffer + pos.index; // must be after alloc since it changes _buffer
+#define append(x) \
+ memcpy(dest, &(x), sizeof(x)); \
+ dest += sizeof(x)
+ append(value);
+ append(nextCollision);
+ append(nameSize);
+ name.copyTo(dest, true);
+// Padding for alignment handled above
+#undef append
- _numFields++;
+ // Make sure next field starts where we expect it
+ fassert(16486, getField(pos).next()->ptr() == _buffer + _usedBytes);
- if (_numFields > HASH_TAB_MIN) {
- addFieldToHashTable(pos);
- }
- else if (_numFields == HASH_TAB_MIN) {
- // adds all fields to hash table (including the one we just added)
- rehash();
- }
+ _numFields++;
- return getField(pos).val;
+ if (_numFields > HASH_TAB_MIN) {
+ addFieldToHashTable(pos);
+ } else if (_numFields == HASH_TAB_MIN) {
+ // adds all fields to hash table (including the one we just added)
+ rehash();
}
- // Call after adding field to _fields and increasing _numFields
- void DocumentStorage::addFieldToHashTable(Position pos) {
- ValueElement& elem = getField(pos);
- elem.nextCollision = Position();
+ return getField(pos).val;
+}
- const unsigned bucket = bucketForKey(elem.nameSD());
+// Call after adding field to _fields and increasing _numFields
+void DocumentStorage::addFieldToHashTable(Position pos) {
+ ValueElement& elem = getField(pos);
+ elem.nextCollision = Position();
- Position* posPtr = &_hashTab[bucket];
- while (posPtr->found()) {
- // collision: walk links and add new to end
- posPtr = &getField(*posPtr).nextCollision;
- }
- *posPtr = Position(pos.index);
+ const unsigned bucket = bucketForKey(elem.nameSD());
+
+ Position* posPtr = &_hashTab[bucket];
+ while (posPtr->found()) {
+ // collision: walk links and add new to end
+ posPtr = &getField(*posPtr).nextCollision;
}
+ *posPtr = Position(pos.index);
+}
- void DocumentStorage::alloc(unsigned newSize) {
- const bool firstAlloc = !_buffer;
- const bool doingRehash = needRehash();
- const size_t oldCapacity = _bufferEnd - _buffer;
-
- // make new bucket count big enough
- while (needRehash() || hashTabBuckets() < HASH_TAB_INIT_SIZE)
- _hashTabMask = hashTabBuckets()*2 - 1;
-
- // only allocate power-of-two sized space > 128 bytes
- size_t capacity = 128;
- while (capacity < newSize + hashTabBytes())
- capacity *= 2;
-
- uassert(16490, "Tried to make oversized document",
- capacity <= size_t(BufferMaxSize));
-
- std::unique_ptr<char[]> oldBuf(_buffer);
- _buffer = new char[capacity];
- _bufferEnd = _buffer + capacity - hashTabBytes();
-
- if (!firstAlloc) {
- // This just copies the elements
- memcpy(_buffer, oldBuf.get(), _usedBytes);
-
- if (_numFields >= HASH_TAB_MIN) {
- // if we were hashing, deal with the hash table
- if (doingRehash) {
- rehash();
- }
- else {
- // no rehash needed so just slide table down to new position
- memcpy(_hashTab, oldBuf.get() + oldCapacity, hashTabBytes());
- }
+void DocumentStorage::alloc(unsigned newSize) {
+ const bool firstAlloc = !_buffer;
+ const bool doingRehash = needRehash();
+ const size_t oldCapacity = _bufferEnd - _buffer;
+
+ // make new bucket count big enough
+ while (needRehash() || hashTabBuckets() < HASH_TAB_INIT_SIZE)
+ _hashTabMask = hashTabBuckets() * 2 - 1;
+
+ // only allocate power-of-two sized space > 128 bytes
+ size_t capacity = 128;
+ while (capacity < newSize + hashTabBytes())
+ capacity *= 2;
+
+ uassert(16490, "Tried to make oversized document", capacity <= size_t(BufferMaxSize));
+
+ std::unique_ptr<char[]> oldBuf(_buffer);
+ _buffer = new char[capacity];
+ _bufferEnd = _buffer + capacity - hashTabBytes();
+
+ if (!firstAlloc) {
+ // This just copies the elements
+ memcpy(_buffer, oldBuf.get(), _usedBytes);
+
+ if (_numFields >= HASH_TAB_MIN) {
+ // if we were hashing, deal with the hash table
+ if (doingRehash) {
+ rehash();
+ } else {
+ // no rehash needed so just slide table down to new position
+ memcpy(_hashTab, oldBuf.get() + oldCapacity, hashTabBytes());
}
}
}
+}
- void DocumentStorage::reserveFields(size_t expectedFields) {
- fassert(16487, !_buffer);
+void DocumentStorage::reserveFields(size_t expectedFields) {
+ fassert(16487, !_buffer);
- unsigned buckets = HASH_TAB_INIT_SIZE;
- while (buckets < expectedFields)
- buckets *= 2;
- _hashTabMask = buckets - 1;
+ unsigned buckets = HASH_TAB_INIT_SIZE;
+ while (buckets < expectedFields)
+ buckets *= 2;
+ _hashTabMask = buckets - 1;
- // Using expectedFields+1 to allow space for long field names
- const size_t newSize = (expectedFields+1) * ValueElement::align(sizeof(ValueElement));
+ // Using expectedFields+1 to allow space for long field names
+ const size_t newSize = (expectedFields + 1) * ValueElement::align(sizeof(ValueElement));
- uassert(16491, "Tried to make oversized document",
- newSize <= size_t(BufferMaxSize));
+ uassert(16491, "Tried to make oversized document", newSize <= size_t(BufferMaxSize));
+
+ _buffer = new char[newSize + hashTabBytes()];
+ _bufferEnd = _buffer + newSize;
+}
- _buffer = new char[newSize + hashTabBytes()];
- _bufferEnd = _buffer + newSize;
+intrusive_ptr<DocumentStorage> DocumentStorage::clone() const {
+ intrusive_ptr<DocumentStorage> out(new DocumentStorage());
+
+ // Make a copy of the buffer.
+ // It is very important that the positions of each field are the same after cloning.
+ const size_t bufferBytes = (_bufferEnd + hashTabBytes()) - _buffer;
+ out->_buffer = new char[bufferBytes];
+ out->_bufferEnd = out->_buffer + (_bufferEnd - _buffer);
+ memcpy(out->_buffer, _buffer, bufferBytes);
+
+ // Copy remaining fields
+ out->_usedBytes = _usedBytes;
+ out->_numFields = _numFields;
+ out->_hashTabMask = _hashTabMask;
+ out->_hasTextScore = _hasTextScore;
+ out->_textScore = _textScore;
+
+ // Tell values that they have been memcpyed (updates ref counts)
+ for (DocumentStorageIterator it = out->iteratorAll(); !it.atEnd(); it.advance()) {
+ it->val.memcpyed();
}
- intrusive_ptr<DocumentStorage> DocumentStorage::clone() const {
- intrusive_ptr<DocumentStorage> out (new DocumentStorage());
-
- // Make a copy of the buffer.
- // It is very important that the positions of each field are the same after cloning.
- const size_t bufferBytes = (_bufferEnd + hashTabBytes()) - _buffer;
- out->_buffer = new char[bufferBytes];
- out->_bufferEnd = out->_buffer + (_bufferEnd - _buffer);
- memcpy(out->_buffer, _buffer, bufferBytes);
-
- // Copy remaining fields
- out->_usedBytes = _usedBytes;
- out->_numFields = _numFields;
- out->_hashTabMask = _hashTabMask;
- out->_hasTextScore = _hasTextScore;
- out->_textScore = _textScore;
-
- // Tell values that they have been memcpyed (updates ref counts)
- for (DocumentStorageIterator it = out->iteratorAll(); !it.atEnd(); it.advance()) {
- it->val.memcpyed();
- }
+ return out;
+}
- return out;
+DocumentStorage::~DocumentStorage() {
+ std::unique_ptr<char[]> deleteBufferAtScopeEnd(_buffer);
+
+ for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
+ it->val.~Value(); // explicit destructor call
}
+}
- DocumentStorage::~DocumentStorage() {
- std::unique_ptr<char[]> deleteBufferAtScopeEnd (_buffer);
+Document::Document(const BSONObj& bson) {
+ MutableDocument md(bson.nFields());
- for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
- it->val.~Value(); // explicit destructor call
- }
+ BSONObjIterator it(bson);
+ while (it.more()) {
+ BSONElement bsonElement(it.next());
+ md.addField(bsonElement.fieldNameStringData(), Value(bsonElement));
}
- Document::Document(const BSONObj& bson) {
- MutableDocument md(bson.nFields());
-
- BSONObjIterator it(bson);
- while(it.more()) {
- BSONElement bsonElement(it.next());
- md.addField(bsonElement.fieldNameStringData(), Value(bsonElement));
- }
+ *this = md.freeze();
+}
- *this = md.freeze();
- }
+BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Document& doc) {
+ BSONObjBuilder subobj(builder.subobjStart());
+ doc.toBson(&subobj);
+ subobj.doneFast();
+ return builder.builder();
+}
- BSONObjBuilder& operator << (BSONObjBuilderValueStream& builder, const Document& doc) {
- BSONObjBuilder subobj(builder.subobjStart());
- doc.toBson(&subobj);
- subobj.doneFast();
- return builder.builder();
+void Document::toBson(BSONObjBuilder* pBuilder) const {
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ *pBuilder << it->nameSD() << it->val;
}
+}
- void Document::toBson(BSONObjBuilder* pBuilder) const {
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- *pBuilder << it->nameSD() << it->val;
- }
- }
+BSONObj Document::toBson() const {
+ BSONObjBuilder bb;
+ toBson(&bb);
+ return bb.obj();
+}
- BSONObj Document::toBson() const {
- BSONObjBuilder bb;
- toBson(&bb);
- return bb.obj();
- }
+const StringData Document::metaFieldTextScore("$textScore", StringData::LiteralTag());
- const StringData Document::metaFieldTextScore("$textScore", StringData::LiteralTag());
+BSONObj Document::toBsonWithMetaData() const {
+ BSONObjBuilder bb;
+ toBson(&bb);
+ if (hasTextScore())
+ bb.append(metaFieldTextScore, getTextScore());
+ return bb.obj();
+}
- BSONObj Document::toBsonWithMetaData() const {
- BSONObjBuilder bb;
- toBson(&bb);
- if (hasTextScore())
- bb.append(metaFieldTextScore, getTextScore());
- return bb.obj();
- }
+Document Document::fromBsonWithMetaData(const BSONObj& bson) {
+ MutableDocument md;
- Document Document::fromBsonWithMetaData(const BSONObj& bson) {
- MutableDocument md;
-
- BSONObjIterator it(bson);
- while(it.more()) {
- BSONElement elem(it.next());
- if (elem.fieldName()[0] == '$') {
- if (elem.fieldNameStringData() == metaFieldTextScore) {
- md.setTextScore(elem.Double());
- continue;
- }
+ BSONObjIterator it(bson);
+ while (it.more()) {
+ BSONElement elem(it.next());
+ if (elem.fieldName()[0] == '$') {
+ if (elem.fieldNameStringData() == metaFieldTextScore) {
+ md.setTextScore(elem.Double());
+ continue;
}
-
- // Note: this will not parse out metadata in embedded documents.
- md.addField(elem.fieldNameStringData(), Value(elem));
}
- return md.freeze();
+ // Note: this will not parse out metadata in embedded documents.
+ md.addField(elem.fieldNameStringData(), Value(elem));
}
- MutableDocument::MutableDocument(size_t expectedFields)
- : _storageHolder(NULL)
- , _storage(_storageHolder)
- {
- if (expectedFields) {
- storage().reserveFields(expectedFields);
- }
- }
+ return md.freeze();
+}
- MutableValue MutableDocument::getNestedFieldHelper(const FieldPath& dottedField,
- size_t level) {
- if (level == dottedField.getPathLength()-1) {
- return getField(dottedField.getFieldName(level));
- }
- else {
- MutableDocument nested (getField(dottedField.getFieldName(level)));
- return nested.getNestedFieldHelper(dottedField, level+1);
- }
+MutableDocument::MutableDocument(size_t expectedFields)
+ : _storageHolder(NULL), _storage(_storageHolder) {
+ if (expectedFields) {
+ storage().reserveFields(expectedFields);
}
+}
- MutableValue MutableDocument::getNestedField(const FieldPath& dottedField) {
- fassert(16601, dottedField.getPathLength());
- return getNestedFieldHelper(dottedField, 0);
+MutableValue MutableDocument::getNestedFieldHelper(const FieldPath& dottedField, size_t level) {
+ if (level == dottedField.getPathLength() - 1) {
+ return getField(dottedField.getFieldName(level));
+ } else {
+ MutableDocument nested(getField(dottedField.getFieldName(level)));
+ return nested.getNestedFieldHelper(dottedField, level + 1);
}
+}
- MutableValue MutableDocument::getNestedFieldHelper(const vector<Position>& positions,
- size_t level) {
- if (level == positions.size()-1) {
- return getField(positions[level]);
- }
- else {
- MutableDocument nested (getField(positions[level]));
- return nested.getNestedFieldHelper(positions, level+1);
- }
- }
+MutableValue MutableDocument::getNestedField(const FieldPath& dottedField) {
+ fassert(16601, dottedField.getPathLength());
+ return getNestedFieldHelper(dottedField, 0);
+}
- MutableValue MutableDocument::getNestedField(const vector<Position>& positions) {
- fassert(16488, !positions.empty());
- return getNestedFieldHelper(positions, 0);
+MutableValue MutableDocument::getNestedFieldHelper(const vector<Position>& positions,
+ size_t level) {
+ if (level == positions.size() - 1) {
+ return getField(positions[level]);
+ } else {
+ MutableDocument nested(getField(positions[level]));
+ return nested.getNestedFieldHelper(positions, level + 1);
}
+}
- static Value getNestedFieldHelper(const Document& doc,
- const FieldPath& fieldNames,
- vector<Position>* positions,
- size_t level) {
+MutableValue MutableDocument::getNestedField(const vector<Position>& positions) {
+ fassert(16488, !positions.empty());
+ return getNestedFieldHelper(positions, 0);
+}
- const string& fieldName = fieldNames.getFieldName(level);
- const Position pos = doc.positionOf(fieldName);
+static Value getNestedFieldHelper(const Document& doc,
+ const FieldPath& fieldNames,
+ vector<Position>* positions,
+ size_t level) {
+ const string& fieldName = fieldNames.getFieldName(level);
+ const Position pos = doc.positionOf(fieldName);
- if (!pos.found())
- return Value();
+ if (!pos.found())
+ return Value();
- if (positions)
- positions->push_back(pos);
+ if (positions)
+ positions->push_back(pos);
- if (level == fieldNames.getPathLength()-1)
- return doc.getField(pos);
+ if (level == fieldNames.getPathLength() - 1)
+ return doc.getField(pos);
- Value val = doc.getField(pos);
- if (val.getType() != Object)
- return Value();
+ Value val = doc.getField(pos);
+ if (val.getType() != Object)
+ return Value();
- return getNestedFieldHelper(val.getDocument(), fieldNames, positions, level+1);
- }
-
- const Value Document::getNestedField(const FieldPath& fieldNames,
- vector<Position>* positions) const {
- fassert(16489, fieldNames.getPathLength());
- return getNestedFieldHelper(*this, fieldNames, positions, 0);
- }
+ return getNestedFieldHelper(val.getDocument(), fieldNames, positions, level + 1);
+}
- size_t Document::getApproximateSize() const {
- if (!_storage)
- return 0; // we've allocated no memory
+const Value Document::getNestedField(const FieldPath& fieldNames,
+ vector<Position>* positions) const {
+ fassert(16489, fieldNames.getPathLength());
+ return getNestedFieldHelper(*this, fieldNames, positions, 0);
+}
- size_t size = sizeof(DocumentStorage);
- size += storage().allocatedBytes();
+size_t Document::getApproximateSize() const {
+ if (!_storage)
+ return 0; // we've allocated no memory
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- size += it->val.getApproximateSize();
- size -= sizeof(Value); // already accounted for above
- }
+ size_t size = sizeof(DocumentStorage);
+ size += storage().allocatedBytes();
- return size;
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ size += it->val.getApproximateSize();
+ size -= sizeof(Value); // already accounted for above
}
- void Document::hash_combine(size_t &seed) const {
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- StringData name = it->nameSD();
- boost::hash_range(seed, name.rawData(), name.rawData() + name.size());
- it->val.hash_combine(seed);
- }
+ return size;
+}
+
+void Document::hash_combine(size_t& seed) const {
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ StringData name = it->nameSD();
+ boost::hash_range(seed, name.rawData(), name.rawData() + name.size());
+ it->val.hash_combine(seed);
}
+}
- int Document::compare(const Document& rL, const Document& rR) {
- DocumentStorageIterator lIt = rL.storage().iterator();
- DocumentStorageIterator rIt = rR.storage().iterator();
+int Document::compare(const Document& rL, const Document& rR) {
+ DocumentStorageIterator lIt = rL.storage().iterator();
+ DocumentStorageIterator rIt = rR.storage().iterator();
- while (true) {
- if (lIt.atEnd()) {
- if (rIt.atEnd())
- return 0; // documents are the same length
+ while (true) {
+ if (lIt.atEnd()) {
+ if (rIt.atEnd())
+ return 0; // documents are the same length
- return -1; // left document is shorter
- }
+ return -1; // left document is shorter
+ }
- if (rIt.atEnd())
- return 1; // right document is shorter
+ if (rIt.atEnd())
+ return 1; // right document is shorter
- const ValueElement& rField = rIt.get();
- const ValueElement& lField = lIt.get();
+ const ValueElement& rField = rIt.get();
+ const ValueElement& lField = lIt.get();
- // For compatibility with BSONObj::woCompare() consider the canonical type of values
- // before considerting their names.
- const int rCType = canonicalizeBSONType(rField.val.getType());
- const int lCType = canonicalizeBSONType(lField.val.getType());
- if (lCType != rCType)
- return lCType < rCType ? -1 : 1;
+ // For compatibility with BSONObj::woCompare() consider the canonical type of values
+ // before considerting their names.
+ const int rCType = canonicalizeBSONType(rField.val.getType());
+ const int lCType = canonicalizeBSONType(lField.val.getType());
+ if (lCType != rCType)
+ return lCType < rCType ? -1 : 1;
- const int nameCmp = lField.nameSD().compare(rField.nameSD());
- if (nameCmp)
- return nameCmp; // field names are unequal
+ const int nameCmp = lField.nameSD().compare(rField.nameSD());
+ if (nameCmp)
+ return nameCmp; // field names are unequal
- const int valueCmp = Value::compare(lField.val, rField.val);
- if (valueCmp)
- return valueCmp; // fields are unequal
+ const int valueCmp = Value::compare(lField.val, rField.val);
+ if (valueCmp)
+ return valueCmp; // fields are unequal
- rIt.advance();
- lIt.advance();
- }
+ rIt.advance();
+ lIt.advance();
}
+}
- string Document::toString() const {
- if (empty())
- return "{}";
-
- StringBuilder out;
- const char* prefix = "{";
+string Document::toString() const {
+ if (empty())
+ return "{}";
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- out << prefix << it->nameSD() << ": " << it->val.toString();
- prefix = ", ";
- }
- out << '}';
+ StringBuilder out;
+ const char* prefix = "{";
- return out.str();
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ out << prefix << it->nameSD() << ": " << it->val.toString();
+ prefix = ", ";
}
+ out << '}';
- void Document::serializeForSorter(BufBuilder& buf) const {
- const int numElems = size();
- buf.appendNum(numElems);
+ return out.str();
+}
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- buf.appendStr(it->nameSD(), /*NUL byte*/ true);
- it->val.serializeForSorter(buf);
- }
+void Document::serializeForSorter(BufBuilder& buf) const {
+ const int numElems = size();
+ buf.appendNum(numElems);
- if (hasTextScore()) {
- buf.appendNum(char(1));
- buf.appendNum(getTextScore());
- }
- else {
- buf.appendNum(char(0));
- }
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ buf.appendStr(it->nameSD(), /*NUL byte*/ true);
+ it->val.serializeForSorter(buf);
}
- Document Document::deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
- const int numElems = buf.read<int>();
- MutableDocument doc(numElems);
- for (int i = 0; i < numElems; i++) {
- StringData name = buf.readCStr();
- doc.addField(name, Value::deserializeForSorter(buf,
- Value::SorterDeserializeSettings()));
- }
-
- if (buf.read<char>()) // hasTextScore
- doc.setTextScore(buf.read<double>());
+ if (hasTextScore()) {
+ buf.appendNum(char(1));
+ buf.appendNum(getTextScore());
+ } else {
+ buf.appendNum(char(0));
+ }
+}
- return doc.freeze();
+Document Document::deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
+ const int numElems = buf.read<int>();
+ MutableDocument doc(numElems);
+ for (int i = 0; i < numElems; i++) {
+ StringData name = buf.readCStr();
+ doc.addField(name, Value::deserializeForSorter(buf, Value::SorterDeserializeSettings()));
}
+
+ if (buf.read<char>()) // hasTextScore
+ doc.setTextScore(buf.read<double>());
+
+ return doc.freeze();
+}
}