summaryrefslogtreecommitdiff
path: root/src/mongo/db/pipeline/document_internal.h
blob: 6d7ecd7f675fa1c7e8dcba5a7cccaddc4153e01d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
/**
 *    Copyright (C) 2018-present MongoDB, Inc.
 *
 *    This program is free software: you can redistribute it and/or modify
 *    it under the terms of the Server Side Public License, version 1,
 *    as published by MongoDB, Inc.
 *
 *    This program is distributed in the hope that it will be useful,
 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *    Server Side Public License for more details.
 *
 *    You should have received a copy of the Server Side Public License
 *    along with this program. If not, see
 *    <http://www.mongodb.com/licensing/server-side-public-license>.
 *
 *    As a special exception, the copyright holders give permission to link the
 *    code of portions of this program with the OpenSSL library under certain
 *    conditions as described in each individual source file and distribute
 *    linked combinations including the program with the OpenSSL library. You
 *    must comply with the Server Side Public License in all respects for
 *    all of the code used other than as permitted herein. If you modify file(s)
 *    with this exception, you may extend this exception to your version of the
 *    file(s), but you are not obligated to do so. If you do not wish to do so,
 *    delete this exception statement from your version. If you delete this
 *    exception statement from all source files in the program, then also delete
 *    it in the license file.
 */

#pragma once

#include <third_party/murmurhash3/MurmurHash3.h>

#include <bitset>
#include <boost/intrusive_ptr.hpp>

#include "mongo/base/static_assert.h"
#include "mongo/db/pipeline/value.h"
#include "mongo/util/intrusive_counter.h"

namespace mongo {
/** Helper class to make the position in a document abstract
 *  Warning: This is NOT guaranteed to be the ordered position.
 *           eg. the first field may not be at Position(0)
 */
class Position {
public:
    // This represents "not found" similar to std::string::npos
    Position() : index(static_cast<unsigned>(-1)) {}
    bool found() const {
        return index != Position().index;
    }

    bool operator==(Position rhs) const {
        return this->index == rhs.index;
    }
    bool operator!=(Position rhs) const {
        return !(*this == rhs);
    }

    // For debugging and ASSERT_EQUALS in tests.
    template <typename OStream>
    friend OStream& operator<<(OStream& stream, Position p) {
        return stream << p.index;
    }

private:
    explicit Position(size_t i) : index(i) {}
    unsigned index;
    friend class DocumentStorage;
    friend class DocumentStorageIterator;
    friend class DocumentStorageCacheIterator;
};

#pragma pack(1)
/** This is how values are stored in the DocumentStorage buffer
 *  Internal class. Consumers shouldn't care about this.
 */
class ValueElement {
    ValueElement(const ValueElement&) = delete;
    ValueElement& operator=(const ValueElement&) = delete;

public:
    enum class Kind : char {
        // The value does not exist in the underlying BSON.
        kInserted,
        // The value has the image in the underlying BSON.
        kCached,
        // The value has been opportunistically inserted into the cache without checking the BSON.
        kMaybeInserted
    };

    Value val;
    Position nextCollision;  // Position of next field with same hashBucket
    const int nameLen;       // doesn't include '\0'
    Kind kind;               // See the possible kinds above for comments
    const char _name[1];     // pointer to start of name (use nameSD instead)

    ValueElement* next() {
        return align(plusBytes(sizeof(ValueElement) + nameLen));
    }

    const ValueElement* next() const {
        return align(plusBytes(sizeof(ValueElement) + nameLen));
    }

    StringData nameSD() const {
        return StringData(_name, nameLen);
    }


    // helpers for doing pointer arithmetic with this class
    char* ptr() {
        return reinterpret_cast<char*>(this);
    }
    const char* ptr() const {
        return reinterpret_cast<const char*>(this);
    }
    const ValueElement* plusBytes(size_t bytes) const {
        return reinterpret_cast<const ValueElement*>(ptr() + bytes);
    }
    ValueElement* plusBytes(size_t bytes) {
        return reinterpret_cast<ValueElement*>(ptr() + bytes);
    }

    // Round number or pointer up to N-byte boundary. No change if already aligned.
    template <typename T>
    static T align(T size) {
        const intmax_t ALIGNMENT = 8;  // must be power of 2 and <= 16 (malloc alignment)
        // Can't use c++ cast because of conversion between intmax_t and both ints and pointers
        return (T)(((intmax_t)(size) + (ALIGNMENT - 1)) & ~(ALIGNMENT - 1));
    }

private:
    ValueElement();   // this class should never be constructed
    ~ValueElement();  // or destructed
};
// Real size is sizeof(ValueElement) + nameLen
#pragma pack()
MONGO_STATIC_ASSERT(sizeof(ValueElement) ==
                    (sizeof(Value) + sizeof(Position) + sizeof(int) + sizeof(char) + 1));

class DocumentStorage;

/**
 * This is an internal class for Document. See FieldIterator for the public version.
 *
 * We iterate the fields in 2 phases.
 * In the first phase we walk the underlying bson and consult the cache to see if the current
 * element has been modified. If the element has been deleted then we skip it, if it has been
 * updated then we return the updated value from the cache.
 * If it is not in the cache at all then the element has not been requested at all (i.e. nobody
 * called getField with the current field name). At this point we could construct the Value in cache
 * but we don't do it as not all iterator users actually inspect the values (e.g. size() just counts
 * # of elements, it does not care about the values at all).
 * This walk over the underlying bson makes the _it to 'jump around'.
 *
 * In the second phase (once we exhaust the bson) we walk the cache and return the inserted values
 * as they were not present in the original bson.
 *
 * We do this 'complicated' dance in order to guarantee the original ordering of fields.
 */
class DocumentStorageIterator {
public:
    // DocumentStorage::iterator() and iteratorCacheOnly() are easier to use
    DocumentStorageIterator(DocumentStorage* storage, BSONObjIterator bsonIt);

    bool atEnd() const {
        return !_bsonIt.more() && (_it == _end);
    }

    const ValueElement& get() {
        if (_it) {
            return *_it;
        } else {
            return constructInCache();
        }
    }

    Position position() const {
        return Position(_it->ptr() - _first->ptr());
    }

    void advance();

    const ValueElement* operator->() {
        return &get();
    }
    const ValueElement& operator*() {
        return get();
    }

    const ValueElement* cachedValue() const {
        return _it;
    }

    BSONObjIterator& bsonIter() {
        return _bsonIt;
    }

private:
    /** Construct a new ValueElement in the storage cache. The value is coming from the current
     *  BSONElement pointed to by _bsonIt.
     */
    const ValueElement& constructInCache();

    void advanceOne() {
        if (_bsonIt.more()) {
            ++_bsonIt;
            if (!_bsonIt.more()) {
                _it = _first;
            }
        } else {
            _it = _it->next();
        }
    }
    bool shouldSkipDeleted();

    BSONObjIterator _bsonIt;
    const ValueElement* _first;
    const ValueElement* _it;
    const ValueElement* _end;
    DocumentStorage* _storage;
};

class DocumentStorageCacheIterator {
public:
    DocumentStorageCacheIterator(const ValueElement* first, const ValueElement* end)
        : _first(first), _it(first), _end(end) {}

    bool atEnd() const {
        return _it == _end;
    }

    const ValueElement& get() const {
        return *_it;
    }

    Position position() const {
        return Position(_it->ptr() - _first->ptr());
    }

    void advance() {
        advanceOne();
    }

    const ValueElement* operator->() {
        return _it;
    }
    const ValueElement& operator*() {
        return *_it;
    }

private:
    void advanceOne() {
        _it = _it->next();
    }

    const ValueElement* _first;
    const ValueElement* _it;
    const ValueElement* _end;
};

enum MetaType : char {
    TEXT_SCORE,
    RAND_VAL,
    SORT_KEY,
    GEONEAR_DIST,
    GEONEAR_POINT,
    SEARCH_SCORE,
    SEARCH_HIGHLIGHTS,

    // New fields must be added before the NUM_FIELDS sentinel.
    NUM_FIELDS
};

/**
 * A simple container of all metadata fields
 *
 */
struct MetadataFields {
    std::bitset<MetaType::NUM_FIELDS> _metaFields;
    double _textScore{0.0};
    double _randVal{0.0};
    BSONObj _sortKey;
    double _geoNearDistance{0.0};
    Value _geoNearPoint;
    double _searchScore{0.0};
    Value _searchHighlights;

    MetadataFields() {}
    // When adding a field, make sure to update the copy constructor.
    MetadataFields(const MetadataFields& other);

    size_t getApproximateSize() const;

    bool hasTextScore() const {
        return _metaFields.test(MetaType::TEXT_SCORE);
    }
    double getTextScore() const {
        return _textScore;
    }
    void setTextScore(double score) {
        _metaFields.set(MetaType::TEXT_SCORE);
        _textScore = score;
    }

    bool hasRandMetaField() const {
        return _metaFields.test(MetaType::RAND_VAL);
    }
    double getRandMetaField() const {
        return _randVal;
    }
    void setRandMetaField(double val) {
        _metaFields.set(MetaType::RAND_VAL);
        _randVal = val;
    }

    bool hasSortKeyMetaField() const {
        return _metaFields.test(MetaType::SORT_KEY);
    }
    BSONObj getSortKeyMetaField() const {
        return _sortKey;
    }
    void setSortKeyMetaField(BSONObj sortKey) {
        _metaFields.set(MetaType::SORT_KEY);
        _sortKey = sortKey.getOwned();
    }

    bool hasGeoNearDistance() const {
        return _metaFields.test(MetaType::GEONEAR_DIST);
    }
    double getGeoNearDistance() const {
        return _geoNearDistance;
    }
    void setGeoNearDistance(double dist) {
        _metaFields.set(MetaType::GEONEAR_DIST);
        _geoNearDistance = dist;
    }

    bool hasGeoNearPoint() const {
        return _metaFields.test(MetaType::GEONEAR_POINT);
    }
    Value getGeoNearPoint() const {
        return _geoNearPoint;
    }
    void setGeoNearPoint(Value point) {
        _metaFields.set(MetaType::GEONEAR_POINT);
        _geoNearPoint = std::move(point);
    }

    bool hasSearchScore() const {
        return _metaFields.test(MetaType::SEARCH_SCORE);
    }
    double getSearchScore() const {
        return _searchScore;
    }
    void setSearchScore(double score) {
        _metaFields.set(MetaType::SEARCH_SCORE);
        _searchScore = score;
    }

    bool hasSearchHighlights() const {
        return _metaFields.test(MetaType::SEARCH_HIGHLIGHTS);
    }
    Value getSearchHighlights() const {
        return _searchHighlights;
    }
    void setSearchHighlights(Value highlights) {
        _metaFields.set(MetaType::SEARCH_HIGHLIGHTS);
        _searchHighlights = highlights;
    }
};


/// Storage class used by both Document and MutableDocument
class DocumentStorage : public RefCountable {
public:
    DocumentStorage()
        : _cache(nullptr),
          _cacheEnd(nullptr),
          _usedBytes(0),
          _numFields(0),
          _hashTabMask(0),
          _bsonIt(_bson) {}

    /**
     * Construct a storage from the BSON. The BSON is lazily processed as fields are requested from
     * the document. If we know that the BSON does not contain any metadata fields we can set the
     * 'stripMetadata' flag to false that will speed up the field iteration.
     */
    DocumentStorage(const BSONObj& bson, bool stripMetadata, bool modified) : DocumentStorage() {
        _bson = bson.getOwned();
        _bsonIt = BSONObjIterator(_bson);
        _stripMetadata = stripMetadata;
        _modified = modified;
    }

    ~DocumentStorage();

    static const DocumentStorage& emptyDoc() {
        return kEmptyDoc;
    }

    size_t size() const {
        // can't use _numFields because it includes removed Fields
        size_t count = 0;
        for (DocumentStorageIterator it = iterator(); !it.atEnd(); it.advance())
            count++;
        return count;
    }

    /// Returns the position of the next field to be inserted
    Position getNextPosition() const {
        return Position(_usedBytes);
    }

    enum class LookupPolicy {
        // When looking up a field check the cache only.
        kCacheOnly,
        // Look up in a cache and when not found search the unrelying BSON.
        kCacheAndBSON
    };

    /// Returns the position of the named field or Position()
    Position findField(StringData name, LookupPolicy policy) const;

    // Document uses these
    const ValueElement& getField(Position pos) const {
        verify(pos.found());
        return *(_firstElement->plusBytes(pos.index));
    }
    Value getField(StringData name) const {
        Position pos = findField(name, LookupPolicy::kCacheAndBSON);
        if (!pos.found())
            return Value();
        return getField(pos).val;
    }

    // MutableDocument uses these
    ValueElement& getField(Position pos) {
        _modified = true;
        verify(pos.found());
        return *(_firstElement->plusBytes(pos.index));
    }
    Value& getField(StringData name, LookupPolicy policy) {
        _modified = true;
        Position pos = findField(name, policy);
        if (!pos.found())
            return appendField(name, ValueElement::Kind::kMaybeInserted);
        return getField(pos).val;
    }

    /// Adds a new field with missing Value at the end of the document
    Value& appendField(StringData name, ValueElement::Kind kind);

    /** Preallocates space for fields. Use this to attempt to prevent buffer growth.
     *  This is only valid to call before anything is added to the document.
     */
    void reserveFields(size_t expectedFields);

    /// This returns values from the cache and underlying BSON.
    DocumentStorageIterator iterator() const {
        return DocumentStorageIterator(const_cast<DocumentStorage*>(this), BSONObjIterator(_bson));
    }

    /// This returns values from the cache only.
    auto iteratorCacheOnly() const {
        return DocumentStorageCacheIterator(_firstElement, end());
    }

    /// Shallow copy of this. Caller owns memory.
    boost::intrusive_ptr<DocumentStorage> clone() const;

    size_t allocatedBytes() const {
        return !_cache ? 0 : (_cacheEnd - _cache + hashTabBytes());
    }

    auto bsonObjSize() const {
        return _bson.objsize();
    }
    /**
     * Compute the space allocated for the metadata fields. Will account for space allocated for
     * unused metadata fields as well.
     */
    size_t getMetadataApproximateSize() const;

    /**
     * Copies all metadata from source if it has any.
     * Note: does not clear metadata from this.
     */
    void copyMetaDataFrom(const DocumentStorage& source) {
        // It the underlying BSON object is shared and the source does not have metadata then
        // nothing needs to be copied. If the metadata is in the BSON then they are the same in
        // this and source.
        if (_bson.objdata() == source._bson.objdata() && !source._metadataFields) {
            return;
        }
        if (source.hasTextScore()) {
            setTextScore(source.getTextScore());
        }
        if (source.hasRandMetaField()) {
            setRandMetaField(source.getRandMetaField());
        }
        if (source.hasSortKeyMetaField()) {
            setSortKeyMetaField(source.getSortKeyMetaField());
        }
        if (source.hasGeoNearDistance()) {
            setGeoNearDistance(source.getGeoNearDistance());
        }
        if (source.hasGeoNearPoint()) {
            setGeoNearPoint(source.getGeoNearPoint());
        }
        if (source.hasSearchScore()) {
            setSearchScore(source.getSearchScore());
        }
        if (source.hasSearchHighlights()) {
            setSearchHighlights(source.getSearchHighlights());
        }
    }

    bool hasTextScore() const {
        loadLazyMetadata();
        return _metadataFields->hasTextScore();
    }
    double getTextScore() const {
        loadLazyMetadata();
        return _metadataFields->getTextScore();
    }
    void setTextScore(double score) {
        loadLazyMetadata();
        _metadataFields->setTextScore(score);
    }

    bool hasRandMetaField() const {
        loadLazyMetadata();
        return _metadataFields->hasRandMetaField();
    }
    double getRandMetaField() const {
        loadLazyMetadata();
        return _metadataFields->getRandMetaField();
    }
    void setRandMetaField(double val) {
        loadLazyMetadata();
        _metadataFields->setRandMetaField(val);
    }

    bool hasSortKeyMetaField() const {
        loadLazyMetadata();
        return _metadataFields->hasSortKeyMetaField();
    }
    BSONObj getSortKeyMetaField() const {
        loadLazyMetadata();
        return _metadataFields->getSortKeyMetaField();
    }
    void setSortKeyMetaField(BSONObj sortKey) {
        loadLazyMetadata();
        _metadataFields->setSortKeyMetaField(sortKey);
    }

    bool hasGeoNearDistance() const {
        loadLazyMetadata();
        return _metadataFields->hasGeoNearDistance();
    }
    double getGeoNearDistance() const {
        loadLazyMetadata();
        return _metadataFields->getGeoNearDistance();
    }
    void setGeoNearDistance(double dist) {
        loadLazyMetadata();
        _metadataFields->setGeoNearDistance(dist);
    }

    bool hasGeoNearPoint() const {
        loadLazyMetadata();
        return _metadataFields->hasGeoNearPoint();
    }
    Value getGeoNearPoint() const {
        loadLazyMetadata();
        return _metadataFields->getGeoNearPoint();
    }
    void setGeoNearPoint(Value point) {
        loadLazyMetadata();
        _metadataFields->setGeoNearPoint(point);
    }

    bool hasSearchScore() const {
        loadLazyMetadata();
        return _metadataFields->hasSearchScore();
    }
    double getSearchScore() const {
        loadLazyMetadata();
        return _metadataFields->getSearchScore();
    }
    void setSearchScore(double score) {
        loadLazyMetadata();
        _metadataFields->setSearchScore(score);
    }

    bool hasSearchHighlights() const {
        loadLazyMetadata();
        return _metadataFields->hasSearchHighlights();
    }
    Value getSearchHighlights() const {
        loadLazyMetadata();
        return _metadataFields->getSearchHighlights();
    }
    void setSearchHighlights(Value highlights) {
        loadLazyMetadata();
        _metadataFields->setSearchHighlights(highlights);
    }

    static unsigned hashKey(StringData name) {
        // TODO consider FNV-1a once we have a better benchmark corpus
        unsigned out;
        MurmurHash3_x86_32(name.rawData(), name.size(), 0, &out);
        return out;
    }

    const ValueElement* begin() const {
        return _firstElement;
    }

    /// Same as lastElement->next() or firstElement() if empty.
    const ValueElement* end() const {
        return _firstElement ? _firstElement->plusBytes(_usedBytes) : nullptr;
    }

    auto stripMetadata() const {
        return _stripMetadata;
    }

    Position constructInCache(const BSONElement& elem);

    auto isModified() const {
        return _modified;
    }
    auto bsonObj() const {
        return _bson;
    }

private:
    /// Returns the position of the named field in the cache or Position()
    Position findFieldInCache(StringData name) const;

    /// Allocates space in _cache. Copies existing data if there is any.
    void alloc(unsigned newSize);

    /// Call after adding field to _cache and increasing _numFields
    void addFieldToHashTable(Position pos);

    // assumes _hashTabMask is (power of two) - 1
    unsigned hashTabBuckets() const {
        return _hashTabMask + 1;
    }
    unsigned hashTabBytes() const {
        return hashTabBuckets() * sizeof(Position);
    }

    /// rehash on buffer growth if load-factor > .5 (attempt to keep lf < 1 when full)
    bool needRehash() const {
        return _numFields * 2 > hashTabBuckets();
    }

    /// Initialize empty hash table
    void hashTabInit() {
        memset(static_cast<void*>(_hashTab), -1, hashTabBytes());
    }

    unsigned bucketForKey(StringData name) const {
        return hashKey(name) & _hashTabMask;
    }

    /// Adds all fields to the hash table
    void rehash() {
        hashTabInit();
        for (auto it = iteratorCacheOnly(); !it.atEnd(); it.advance())
            addFieldToHashTable(it.position());
    }

    void loadLazyMetadata() const;

    enum {
        HASH_TAB_INIT_SIZE = 8,  // must be power of 2
        HASH_TAB_MIN = 4,        // don't hash fields for docs smaller than this
                                 // set to 1 to always hash
    };

    // _cache layout:
    // -------------------------------------------------------------------------------
    // | ValueElement1 Name1 | ValueElement2 Name2 | ... FREE SPACE ... | Hash Table |
    // -------------------------------------------------------------------------------
    //  ^ _cache and _firstElement point here                           ^
    //                                _cacheEnd and _hashTab point here ^
    //
    //
    // When the buffer grows, the hash table moves to the new end.
    union {
        char* _cache;
        ValueElement* _firstElement;
    };

    union {
        // pointer to "end" of _cache element space and start of hash table (same position)
        char* _cacheEnd;
        Position* _hashTab;  // table lazily initialized once _numFields == HASH_TAB_MIN
    };

    unsigned _usedBytes;    // position where next field would start
    unsigned _numFields;    // this includes removed fields
    unsigned _hashTabMask;  // equal to hashTabBuckets()-1 but used more often

    BSONObj _bson;
    mutable BSONObjIterator _bsonIt;

    mutable std::unique_ptr<MetadataFields> _metadataFields;

    // The storage constructed from a BSON value may contain metadata. When we process the BSON we
    // have to move the metadata to the MetadataFields object. If we know that the BSON does not
    // have any metadata we can set _stripMetadata to false that will speed up the iteration.
    bool _stripMetadata{false};

    // This flag is set to true anytime the storage returns a mutable field. It is used to optimize
    // a conversion to BSON; i.e. if there are not any modifications we can directly return _bson.
    // Note that an empty (default) document is marked 'modified'. The reason for this is that the
    // empty _bson is not owned but consumers expect toBson() to return owned BSON.
    bool _modified{true};

    // Defined in document.cpp
    static const DocumentStorage kEmptyDoc;

    friend class DocumentStorageIterator;
};
}