1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
|
/**
* Copyright (C) 2018-present MongoDB, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
* as published by MongoDB, Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Server Side Public License for more details.
*
* You should have received a copy of the Server Side Public License
* along with this program. If not, see
* <http://www.mongodb.com/licensing/server-side-public-license>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the Server Side Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#pragma once
#include <set>
#include <string>
#include <vector>
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/s/chunk.h"
#include "mongo/s/database_version.h"
#include "mongo/s/resharding/type_collection_fields_gen.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/s/shard_version.h"
#include "mongo/s/type_collection_common_types_gen.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/read_through_cache.h"
namespace mongo {
class ChunkManager;
struct PlacementVersionTargetingInfo {
// Indicates whether the shard is stale and thus needs a catalog cache refresh
AtomicWord<bool> isStale{false};
// Max chunk version for the shard
ChunkVersion placementVersion;
PlacementVersionTargetingInfo(const OID& epoch, const Timestamp& timestamp);
};
// Map from a shard to a struct indicating both the max chunk version on that shard and whether the
// shard is currently marked as needing a catalog cache refresh (stale).
using ShardPlacementVersionMap =
stdx::unordered_map<ShardId, PlacementVersionTargetingInfo, ShardId::Hasher>;
/**
* This class serves as a Facade around how the mapping of ranges to chunks is represented. It also
* provides a simpler, high-level interface for domain specific operations without exposing the
* underlying implementation.
*/
class ChunkMap {
// Vector of chunks ordered by max key.
using ChunkVector = std::vector<std::shared_ptr<ChunkInfo>>;
public:
ChunkMap(OID epoch, const Timestamp& timestamp, size_t initialCapacity = 0);
size_t size() const {
return _chunkMap.size();
}
ChunkVersion getVersion() const {
return _collectionPlacementVersion;
}
template <typename Callable>
void forEach(Callable&& handler, const BSONObj& shardKey = BSONObj()) const {
auto it = shardKey.isEmpty() ? _chunkMap.begin() : _findIntersectingChunk(shardKey);
for (; it != _chunkMap.end(); ++it) {
if (!handler(*it))
break;
}
}
template <typename Callable>
void forEachOverlappingChunk(const BSONObj& min,
const BSONObj& max,
bool isMaxInclusive,
Callable&& handler) const {
const auto bounds = _overlappingBounds(min, max, isMaxInclusive);
for (auto it = bounds.first; it != bounds.second; ++it) {
if (!handler(*it))
break;
}
}
ShardPlacementVersionMap constructShardPlacementVersionMap() const;
std::shared_ptr<ChunkInfo> findIntersectingChunk(const BSONObj& shardKey) const;
void appendChunk(const std::shared_ptr<ChunkInfo>& chunk);
ChunkMap createMerged(const std::vector<std::shared_ptr<ChunkInfo>>& changedChunks) const;
BSONObj toBSON() const;
static bool allElementsAreOfType(BSONType type, const BSONObj& obj);
private:
ChunkVector::const_iterator _findIntersectingChunk(const BSONObj& shardKey,
bool isMaxInclusive = true) const;
std::pair<ChunkVector::const_iterator, ChunkVector::const_iterator> _overlappingBounds(
const BSONObj& min, const BSONObj& max, bool isMaxInclusive) const;
ChunkVector _chunkMap;
// Max version across all chunks
ChunkVersion _collectionPlacementVersion;
};
/**
* In-memory representation of the routing table for a single sharded collection at various points
* in time.
*/
class RoutingTableHistory {
RoutingTableHistory(const RoutingTableHistory&) = delete;
RoutingTableHistory& operator=(const RoutingTableHistory&) = delete;
public:
RoutingTableHistory(RoutingTableHistory&&) = default;
RoutingTableHistory& operator=(RoutingTableHistory&&) = default;
/**
* Makes an instance with a routing table for collection "nss", sharded on
* "shardKeyPattern".
*
* "defaultCollator" is the default collation for the collection, "unique" indicates whether
* or not the shard key for each document will be globally unique, and "epoch" is the globally
* unique identifier for this version of the collection.
*
* The "chunks" vector must contain the chunk routing information sorted in ascending order by
* chunk version, and adhere to the requirements of the routing table update algorithm.
*
* The existence of "reshardingFields" inside the optional implies that this field was present
* inside the config.collections entry when refreshing.
*/
static RoutingTableHistory makeNew(
NamespaceString nss,
UUID uuid,
KeyPattern shardKeyPattern,
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique,
OID epoch,
const Timestamp& timestamp,
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
bool allowMigrations,
const std::vector<ChunkType>& chunks);
/**
* Constructs a new instance with a routing table updated according to the changes described
* in "changedChunks".
*
* The changes in "changedChunks" must be sorted in ascending order by chunk version, and adhere
* to the requirements of the routing table update algorithm.
*
* The existence of timeseriesFields/reshardingFields inside the optional implies that this
* field was present inside the config.collections entry when refreshing. An uninitialized
* timeseriesFields/reshardingFields parameter implies that the field was not present, and will
* clear any currently held timeseries/resharding fields inside the resulting
* RoutingTableHistory.
*/
RoutingTableHistory makeUpdated(
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
bool allowMigrations,
const std::vector<ChunkType>& changedChunks) const;
const NamespaceString& nss() const {
return _nss;
}
const ShardKeyPattern& getShardKeyPattern() const {
return _shardKeyPattern;
}
const CollatorInterface* getDefaultCollator() const {
return _defaultCollator.get();
}
bool isUnique() const {
return _unique;
}
/**
* Mark the given shard as stale, indicating that requests targetted to this shard (for this
* namespace) need to block on a catalog cache refresh.
*/
void setShardStale(const ShardId& shardId);
/**
* Mark all shards as not stale, indicating that a refresh has happened and requests targeted
* to all shards (for this namespace) do not currently need to block on a catalog cache refresh.
*/
void setAllShardsRefreshed();
ChunkVersion getVersion() const {
return _chunkMap.getVersion();
}
/**
* Retrieves the placement version for the given shard. Will throw a
* ShardInvalidatedForTargeting exception if the shard is marked as stale.
*/
ChunkVersion getVersion(const ShardId& shardId) const;
/**
* Retrieves the placement version for the given shard. Will not throw if the shard is marked as
* stale. Only use when logging the given chunk version -- if the caller must execute logic
* based on the returned version, use getVersion() instead.
*/
ChunkVersion getVersionForLogging(const ShardId& shardId) const;
size_t numChunks() const {
return _chunkMap.size();
}
template <typename Callable>
void forEachChunk(Callable&& handler, const BSONObj& shardKey = BSONObj()) const {
_chunkMap.forEach(std::forward<Callable>(handler), shardKey);
}
template <typename Callable>
void forEachOverlappingChunk(const BSONObj& min,
const BSONObj& max,
bool isMaxInclusive,
Callable&& handler) const {
_chunkMap.forEachOverlappingChunk(
min, max, isMaxInclusive, std::forward<Callable>(handler));
}
std::shared_ptr<ChunkInfo> findIntersectingChunk(const BSONObj& shardKey) const {
return _chunkMap.findIntersectingChunk(shardKey);
}
/**
* Returns the ids of all shards on which the collection has any chunks.
*/
void getAllShardIds(std::set<ShardId>* all) const;
/**
* Returns all chunk ranges for the collection.
*/
void getAllChunkRanges(std::set<ChunkRange>* all) const;
/**
* Returns the number of shards on which the collection has any chunks
*/
size_t getNShardsOwningChunks() const {
return _placementVersions.size();
}
/**
* Returns true if, for this shard, the chunks are identical in both chunk managers
*/
bool compatibleWith(const RoutingTableHistory& other, const ShardId& shard) const;
std::string toString() const;
bool uuidMatches(const UUID& uuid) const {
return _uuid == uuid;
}
const UUID& getUUID() const {
return _uuid;
}
const boost::optional<TypeCollectionTimeseriesFields>& getTimeseriesFields() const {
return _timeseriesFields;
}
const boost::optional<TypeCollectionReshardingFields>& getReshardingFields() const {
return _reshardingFields;
}
bool allowMigrations() const {
return _allowMigrations;
}
private:
friend class ChunkManager;
RoutingTableHistory(NamespaceString nss,
UUID uuid,
KeyPattern shardKeyPattern,
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique,
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields,
boost::optional<TypeCollectionReshardingFields> reshardingFields,
bool allowMigrations,
ChunkMap chunkMap);
ChunkVersion _getVersion(const ShardId& shardName, bool throwOnStaleShard) const;
// Namespace to which this routing information corresponds
NamespaceString _nss;
// The UUID of the collection
UUID _uuid;
// The key pattern used to shard the collection
ShardKeyPattern _shardKeyPattern;
// Default collation to use for routing data queries for this collection
std::unique_ptr<CollatorInterface> _defaultCollator;
// Whether the sharding key is unique
bool _unique;
// This information will be valid if the collection is a time-series buckets collection.
boost::optional<TypeCollectionTimeseriesFields> _timeseriesFields;
// The set of fields related to an ongoing resharding operation involving this collection. The
// presence of the type inside the optional indicates that the collection is involved in a
// resharding operation, and that these fields were present in the config.collections entry
// for this collection.
boost::optional<TypeCollectionReshardingFields> _reshardingFields;
bool _allowMigrations;
// Map from the max for each chunk to an entry describing the chunk. The union of all chunks'
// ranges must cover the complete space from [MinKey, MaxKey).
ChunkMap _chunkMap;
// The representation of shards' placement versions and staleness indicators for this namespace.
// If a shard does not exist, it will not have an entry in the map. Note: this declaration must
// not be moved before _chunkMap since it is initialized by using the _chunkMap instance.
ShardPlacementVersionMap _placementVersions;
};
/**
* Constructed to be used exclusively by the CatalogCache as a vector clock (Time) to drive
* CollectionCache's lookups.
*
* The ChunkVersion class contains a timestamp for the collection generation which resets to 0 after
* the collection is dropped or all chunks are moved off of a shard, in which case the versions
* cannot be compared.
*
* This class wraps a ChunkVersion object with a node-local sequence number
* (_epochDisambiguatingSequenceNum) that allows the comparision.
*
* This class should go away once a cluster-wide comparable ChunkVersion is implemented.
*/
class ComparableChunkVersion {
public:
/**
* Creates a ComparableChunkVersion that wraps the given ChunkVersion.
* Each object created through this method will have a local sequence number greater than the
* previously created ones.
*/
static ComparableChunkVersion makeComparableChunkVersion(const ChunkVersion& version);
/**
* Creates a new instance which will artificially be greater than any
* previously created ComparableChunkVersion and smaller than any instance
* created afterwards. Used as means to cause the collections cache to
* attempt a refresh in situations where causal consistency cannot be
* inferred.
*/
static ComparableChunkVersion makeComparableChunkVersionForForcedRefresh();
/**
* Empty constructor needed by the ReadThroughCache.
*
* Instances created through this constructor will be always less then the ones created through
* the two static constructors, but they do not carry any meaningful value and can only be used
* for comparison purposes.
*/
ComparableChunkVersion() = default;
std::string toString() const;
bool operator==(const ComparableChunkVersion& other) const;
bool operator!=(const ComparableChunkVersion& other) const {
return !(*this == other);
}
/**
* In case the two compared instances have different epochs, the most recently created one will
* be greater, otherwise the comparision will be driven by the major/minor versions of the
* underlying ChunkVersion.
*/
bool operator<(const ComparableChunkVersion& other) const;
bool operator>(const ComparableChunkVersion& other) const {
return other < *this;
}
bool operator<=(const ComparableChunkVersion& other) const {
return !(*this > other);
}
bool operator>=(const ComparableChunkVersion& other) const {
return !(*this < other);
}
private:
friend class CatalogCache;
static AtomicWord<uint64_t> _epochDisambiguatingSequenceNumSource;
static AtomicWord<uint64_t> _forcedRefreshSequenceNumSource;
ComparableChunkVersion(uint64_t forcedRefreshSequenceNum,
boost::optional<ChunkVersion> version,
uint64_t epochDisambiguatingSequenceNum)
: _forcedRefreshSequenceNum(forcedRefreshSequenceNum),
_chunkVersion(std::move(version)),
_epochDisambiguatingSequenceNum(epochDisambiguatingSequenceNum) {}
void setChunkVersion(const ChunkVersion& version);
uint64_t _forcedRefreshSequenceNum{0};
boost::optional<ChunkVersion> _chunkVersion;
// Locally incremented sequence number that allows to compare two colection versions with
// different epochs. Each new comparableChunkVersion will have a greater sequence number than
// the ones created before.
uint64_t _epochDisambiguatingSequenceNum{0};
};
/**
* This intermediate structure is necessary to be able to store UNSHARDED collections in the routing
* table history cache below. The reason is that currently the RoutingTableHistory class only
* supports sharded collections (i.e., collections which have entries in config.collections and
* config.chunks).
*/
struct OptionalRoutingTableHistory {
// UNSHARDED collection constructor
OptionalRoutingTableHistory() = default;
// SHARDED collection constructor
OptionalRoutingTableHistory(std::shared_ptr<RoutingTableHistory> rt) : optRt(std::move(rt)) {}
// If nullptr, the collection is UNSHARDED, otherwise it is SHARDED
std::shared_ptr<RoutingTableHistory> optRt;
};
using RoutingTableHistoryCache =
ReadThroughCache<NamespaceString, OptionalRoutingTableHistory, ComparableChunkVersion>;
using RoutingTableHistoryValueHandle = RoutingTableHistoryCache::ValueHandle;
/**
* Combines a shard, the shard version, and database version that the shard should be using
*/
struct ShardEndpoint {
ShardEndpoint(const ShardId& shardName,
boost::optional<ShardVersion> shardVersionParam,
boost::optional<DatabaseVersion> dbVersionParam);
ShardId shardName;
boost::optional<ShardVersion> shardVersion;
boost::optional<DatabaseVersion> databaseVersion;
};
/**
* Compares shard endpoints in a map.
*/
struct EndpointComp {
bool operator()(const ShardEndpoint* endpointA, const ShardEndpoint* endpointB) const;
};
/**
* Wrapper around a RoutingTableHistory, which pins it to a particular point in time.
*/
class ChunkManager {
public:
ChunkManager(ShardId dbPrimary,
DatabaseVersion dbVersion,
RoutingTableHistoryValueHandle rt,
boost::optional<Timestamp> clusterTime)
: _dbPrimary(std::move(dbPrimary)),
_dbVersion(std::move(dbVersion)),
_rt(std::move(rt)),
_clusterTime(std::move(clusterTime)) {}
// Methods supported on both sharded and unsharded collections
bool isSharded() const {
return bool(_rt->optRt);
}
bool isAtPointInTime() const {
return bool(_clusterTime);
}
/**
* Indicates that this collection must not honour any moveChunk requests, because it is required
* to provide a stable view of its constituent shards.
*/
bool allowMigrations() const;
const ShardId& dbPrimary() const {
return _dbPrimary;
}
const DatabaseVersion& dbVersion() const {
return _dbVersion;
}
int numChunks() const {
return _rt->optRt ? _rt->optRt->numChunks() : 1;
}
std::string toString() const;
// Methods only supported on sharded collections (caller must check isSharded())
const ShardKeyPattern& getShardKeyPattern() const {
return _rt->optRt->getShardKeyPattern();
}
const CollatorInterface* getDefaultCollator() const {
return _rt->optRt->getDefaultCollator();
}
bool isUnique() const {
return _rt->optRt->isUnique();
}
ChunkVersion getVersion() const {
return _rt->optRt->getVersion();
}
ChunkVersion getVersion(const ShardId& shardId) const {
return _rt->optRt->getVersion(shardId);
}
ChunkVersion getVersionForLogging(const ShardId& shardId) const {
return _rt->optRt->getVersionForLogging(shardId);
}
template <typename Callable>
void forEachChunk(Callable&& handler, const BSONObj& shardKey = BSONObj()) const {
_rt->optRt->forEachChunk(
[this, handler = std::forward<Callable>(handler)](const auto& chunkInfo) mutable {
if (!handler(Chunk{*chunkInfo, _clusterTime}))
return false;
return true;
},
shardKey);
}
/**
* Returns true if a document with the given "shardKey" is owned by the shard with the given
* "shardId" in this routing table. If "shardKey" is empty returns false. If "shardKey" is not a
* valid shard key, the behaviour is undefined.
*/
bool keyBelongsToShard(const BSONObj& shardKey, const ShardId& shardId) const;
/**
* Returns true if any chunk owned by the shard with the given "shardId" overlaps "range".
*/
bool rangeOverlapsShard(const ChunkRange& range, const ShardId& shardId) const;
/**
* Given a shardKey, returns the first chunk which is owned by shardId and overlaps or sorts
* after that shardKey. If the return value is empty, this means no such chunk exists.
*/
boost::optional<Chunk> getNextChunkOnShard(const BSONObj& shardKey,
const ShardId& shardId) const;
/**
* Given a shard key (or a prefix) that has been extracted from a document, returns the chunk
* that contains that key.
*
* Example: findIntersectingChunk({a : hash('foo')}) locates the chunk for document
* {a: 'foo', b: 'bar'} if the shard key is {a : 'hashed'}.
*
* If 'collation' is empty, we use the collection default collation for targeting.
*
* Throws a DBException with the ShardKeyNotFound code if unable to target a single shard due to
* collation or due to the key not matching the shard key pattern.
*/
Chunk findIntersectingChunk(const BSONObj& shardKey,
const BSONObj& collation,
bool bypassIsFieldHashedCheck = false) const;
/**
* Same as findIntersectingChunk, but assumes the simple collation.
*/
Chunk findIntersectingChunkWithSimpleCollation(const BSONObj& shardKey) const {
return findIntersectingChunk(shardKey, CollationSpec::kSimpleSpec);
}
/**
* Finds the shard id of the shard that owns the chunk minKey belongs to, assuming the simple
* collation because shard keys do not support non-simple collations.
*/
ShardId getMinKeyShardIdWithSimpleCollation() const;
/**
* Returns all shard ids which contain chunks overlapping the range [min, max]. Please note the
* inclusive bounds on both sides (SERVER-20768).
* If 'chunkRanges' is not null, populates it with ChunkRanges that would be targeted by the
* query.
*/
void getShardIdsForRange(const BSONObj& min,
const BSONObj& max,
std::set<ShardId>* shardIds,
std::set<ChunkRange>* chunkRanges = nullptr) const;
/**
* Returns the ids of all shards on which the collection has any chunks.
*/
void getAllShardIds(std::set<ShardId>* all) const {
_rt->optRt->getAllShardIds(all);
}
/**
* Returns the chunk ranges of all shards on which the collection has any chunks.
*/
void getAllChunkRanges(std::set<ChunkRange>* all) const {
_rt->optRt->getAllChunkRanges(all);
}
/**
* Returns the number of shards on which the collection has any chunks
*/
size_t getNShardsOwningChunks() const {
return _rt->optRt->getNShardsOwningChunks();
}
/**
* Constructs a new ChunkManager, which is a view of the underlying routing table at a different
* `clusterTime`.
*/
static ChunkManager makeAtTime(const ChunkManager& cm, Timestamp clusterTime);
/**
* Returns true if, for this shard, the chunks are identical in both chunk managers
*/
bool compatibleWith(const ChunkManager& other, const ShardId& shard) const {
return _rt->optRt->compatibleWith(*other._rt->optRt, shard);
}
bool uuidMatches(const UUID& uuid) const {
return _rt->optRt->uuidMatches(uuid);
}
const UUID& getUUID() const {
return _rt->optRt->getUUID();
}
const NamespaceString& getNss() const {
return _rt->optRt->nss();
}
const boost::optional<TypeCollectionTimeseriesFields>& getTimeseriesFields() const {
return _rt->optRt->getTimeseriesFields();
}
const boost::optional<TypeCollectionReshardingFields>& getReshardingFields() const {
return _rt->optRt->getReshardingFields();
}
const RoutingTableHistory& getRoutingTableHistory_ForTest() const {
return *_rt->optRt;
}
private:
ShardId _dbPrimary;
DatabaseVersion _dbVersion;
RoutingTableHistoryValueHandle _rt;
boost::optional<Timestamp> _clusterTime;
};
} // namespace mongo
|