1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
|
# Copyright (C) 2018-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
# as published by MongoDB, Inc.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Server Side Public License for more details.
#
# You should have received a copy of the Server Side Public License
# along with this program. If not, see
# <http://www.mongodb.com/licensing/server-side-public-license>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the Server Side Public License in all respects for
# all of the code used other than as permitted herein. If you modify file(s)
# with this exception, you may extend this exception to your version of the
# file(s), but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version. If you delete this
# exception statement from all source files in the program, then also delete
# it in the license file.
#
# server setParameters for replication
global:
cpp_namespace: "mongo::repl"
cpp_includes:
- "mongo/client/read_preference.h"
imports:
- "mongo/idl/basic_types.idl"
server_parameters:
# From data_replicator_external_state_impl.cpp
initialSyncOplogBuffer:
description: >-
Set this to specify whether to use a collection to buffer the oplog on the
destination server during initial sync to prevent rolling over the oplog.
set_at: startup
cpp_vartype: std::string
cpp_varname: initialSyncOplogBuffer
default: "collection"
initialSyncOplogBufferPeekCacheSize:
description: Set this to specify size of read ahead buffer in the OplogBufferCollection.
set_at: startup
cpp_vartype: int
cpp_varname: initialSyncOplogBufferPeekCacheSize
default: 10000
# From initial_syncer.cpp
numInitialSyncConnectAttempts:
description: The number of attempts to connect to a sync source
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: numInitialSyncConnectAttempts
default: 10
numInitialSyncOplogFindAttempts:
description: The number of attempts to call find on the remote oplog
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: numInitialSyncOplogFindAttempts
default: 3
initialSyncOplogFetcherBatchSize:
description: The batchSize to use for the find/getMore queries called by the OplogFetcher
set_at: startup
cpp_vartype: int
cpp_varname: initialSyncOplogFetcherBatchSize
# 16MB max batch size / 12 byte min doc size * 10 (for good measure) =
# defaultBatchSize to use.
default:
expr: (16 * 1024 * 1024) / 12 * 10
# From oplog_fetcher.cpp
oplogInitialFindMaxSeconds:
description: >-
Number of seconds for the `maxTimeMS` on the initial `find` command.
For the initial 'find' request, we provide a generous timeout, to account for
the potentially slow process of a sync source finding the lastApplied optime
provided in a node's query in its oplog.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: oplogInitialFindMaxSeconds
default: 60
oplogRetriedFindMaxSeconds:
description: Number of seconds for the `maxTimeMS` on any retried `find` commands
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: oplogRetriedFindMaxSeconds
default: 2
oplogFetcherUsesExhaust:
description: >-
Whether to use the "exhaust cursor" feature when fetching oplog entries from the sync
source.
set_at: startup
cpp_vartype: bool
cpp_varname: oplogFetcherUsesExhaust
default: true
oplogBatchDelayMillis:
description: >-
How long, in milliseconds, to wait for more data when an oplog application batch is
not full.
set_at: startup
cpp_vartype: int
cpp_varname: oplogBatchDelayMillis
default: 0
validator:
gte: 0
# From bgsync.cpp
bgSyncOplogFetcherBatchSize:
description: The batchSize to use for the find/getMore queries called by the OplogFetcher
set_at: startup
cpp_vartype: int
cpp_varname: bgSyncOplogFetcherBatchSize
# 16MB max batch size / 12 byte min doc size * 10 (for good measure) =
# defaultBatchSize to use.
default:
expr: (16 * 1024 * 1024) / 12 * 10
rollbackRemoteOplogQueryBatchSize:
description: >-
The batchSize to use for the find/getMore queries called by the rollback
common point resolver. A batchSize of 0 means that the 'find' and 'getMore'
commands will be given no batchSize. We set the default to 2000 to prevent
the sync source from having to read too much data at once, and reduce the
chance of a socket timeout.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: rollbackRemoteOplogQueryBatchSize
# We choose 2000 for (10 minute timeout) * (60 sec / min) * (50 MB / second) /
# (16 MB / document).
default: 2000
forceRollbackViaRefetch:
description: >-
If 'forceRollbackViaRefetch' is true, always perform rollbacks via the
refetch algorithm, even if the storage engine supports rollback via recover
to timestamp.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<bool>
cpp_varname: forceRollbackViaRefetch
default: false
# From noop_writer.cpp
writePeriodicNoops:
description: Sets whether to write periodic noops or not
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<bool>
cpp_varname: writePeriodicNoops
default: true
collectionClonerUsesExhaust:
description: Whether to use the "exhaust cursor" feature when retrieving collection data.
set_at: startup
cpp_vartype: bool
cpp_varname: collectionClonerUsesExhaust
default: true
# From collection_bulk_loader_impl.cpp
collectionBulkLoaderBatchSizeInBytes:
description: >-
Limit for the number of bytes of data inserted per storage transaction
(WriteUnitOfWork) by collectionBulkLoader during initial sync collection cloning
set_at: startup
cpp_vartype: int
cpp_varname: collectionBulkLoaderBatchSizeInBytes
default:
expr: 256 * 1024
# From database_cloner.cpp
collectionClonerBatchSize:
description: >-
The batch size (number of documents) to use for the queries in the
CollectionCloner. Default of '0' means the limit is the number of documents
that will fit in a single BSON object.
set_at: startup
cpp_vartype: int
cpp_varname: collectionClonerBatchSize
default: 0
validator:
gte: 0
# From replication_coordinator_external_state_impl.cpp
oplogFetcherSteadyStateMaxFetcherRestarts:
description: >-
Set this to specify the maximum number of times the oplog fetcher will
consecutively restart the oplog tailing query on non-cancellation errors
during steady state replication.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: oplogFetcherSteadyStateMaxFetcherRestarts
default: 1
validator:
gte: 0
oplogFetcherInitialSyncMaxFetcherRestarts:
description: >-
Set this to specify the maximum number of times the oplog fetcher will
consecutively restart the oplog tailing query on non-cancellation errors
during initial sync. By default we provide a generous amount of restarts
to avoid potentially restarting an entire initial sync from scratch.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: oplogFetcherInitialSyncMaxFetcherRestarts
default: 10
validator:
gte: 0
# From oplog_applier.cpp
replWriterThreadCount:
description: The number of threads in the thread pool used to apply the oplog
set_at: startup
cpp_vartype: int
cpp_varname: replWriterThreadCount
default: 16
validator:
gte: 1
lte: 256
replWriterMinThreadCount:
description: The minimum number of threads in the thread pool used to apply the oplog
set_at: startup
cpp_vartype: int
cpp_varname: replWriterMinThreadCount
default: 0
validator:
gte: 0
lte: 256
replBatchLimitOperations:
description: The maximum number of operations to apply in a single batch
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: replBatchLimitOperations
default:
expr: 5 * 1000
validator:
gte: 1
lte:
expr: 1000 * 1000
replBatchLimitBytes:
description: The maximum oplog application batch size in bytes
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: replBatchLimitBytes
default:
expr: 100 * 1024 * 1024
validator:
gte:
expr: 16 * 1024 * 1024
lte:
expr: 100 * 1024 * 1024
# From tenant_oplog_applier.cpp
tenantApplierBatchSizeBytes:
description: The maximum tenant oplog applier batch size in bytes.
set_at: [startup, runtime]
cpp_vartype: AtomicWord<int>
cpp_varname: tenantApplierBatchSizeBytes
default:
expr: 16 * 1024 * 1024
validator:
gte:
expr: 16 * 1024 * 1024
lte:
expr: 100 * 1024 * 1024
tenantApplierBatchSizeOps:
description: The maximum number of operations in a tenant oplog applier batch.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: tenantApplierBatchSizeOps
default: 500
validator:
gte: 1
lte:
expr: 100 * 1000
minOplogEntriesPerThread:
description: >-
The minimum number of operations allotted to a tenant oplog applier worker thread.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: minOplogEntriesPerThread
default: 16
validator:
gte: 1
lte: 32
tenantApplierThreadCount:
description: >-
The number of threads in the tenant migration writer pool used to apply operations.
set_at: startup
cpp_vartype: int
cpp_varname: tenantApplierThreadCount
default: 5
validator:
gte: 1
lte: 256
recoverFromOplogAsStandalone:
description: Tells the server to perform replication recovery as a standalone.
set_at: startup
cpp_vartype: bool
cpp_varname: recoverFromOplogAsStandalone
default: false
maxSyncSourceLagSecs:
description: ''
set_at: startup
cpp_vartype: int
cpp_varname: maxSyncSourceLagSecs
default: 30
validator:
gt: 0
replElectionTimeoutOffsetLimitFraction:
description: ''
set_at: startup
cpp_vartype: double
cpp_varname: replElectionTimeoutOffsetLimitFraction
default: 0.15
validator:
gt: 0.01
# New parameters since this file was created, not taken from elsewhere.
initialSyncTransientErrorRetryPeriodSeconds:
description: >-
The amount of time to continue retrying transient errors during initial sync before
declaring the attempt failed.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: initialSyncTransientErrorRetryPeriodSeconds
default:
expr: 24 * 60 * 60
validator:
gte: 0
oplogNetworkTimeoutBufferSeconds:
description: >-
Number of seconds to add to the `find` and `getMore` timeouts for oplog fetching to
calculate the network timeout for the requests.
set_at: [ startup ]
cpp_vartype: AtomicWord<int>
cpp_varname: oplogNetworkTimeoutBufferSeconds
default: 5
validator:
gte: 0
oplogApplicationEnforcesSteadyStateConstraints:
description: >-
Whether or not secondary oplog application enforces (by fassert) consistency
constraints that apply if an oplog entry is to be applied exactly once and in order.
set_at: startup
cpp_vartype: bool
cpp_varname: oplogApplicationEnforcesSteadyStateConstraints
default: false
initialSyncSourceReadPreference:
description: >-
Set this to specify how the sync source for initial sync is determined.
Valid options are: nearest, primary, primaryPreferred, secondary,
and secondaryPreferred.
set_at: startup
cpp_vartype: std::string
cpp_varname: initialSyncSourceReadPreference
# When the default is used, if chaining is disabled in the config then readPreference is
# 'primary'. Otherwise, if the node is a voting node, readPreference is 'primaryPreferred'
# and if it is not, readPreference is 'nearest'.
default: ""
validator: { callback: 'validateReadPreferenceMode' }
changeSyncSourceThresholdMillis:
description: >-
Threshold between ping times that are considered as coming from the same data center
and ping times considered as coming from a different data center. Used to determine
if the node should attempt to change sync sources because another node is significantly
closer than its current sync source.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<long long>
cpp_varname: changeSyncSourceThresholdMillis
default: 5
validator:
gte: 0
maxNumSyncSourceChangesPerHour:
description: >-
The number of sync source changes that can happen per hour before the node temporarily
turns off reevaluating its sync source. This will only affect sync source changes while
a node has a valid sync source.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: maxNumSyncSourceChangesPerHour
default: 3
validator:
gt: 0
enableOverrideClusterChainingSetting:
description: >-
When enabled, allows a node to override the cluster-wide chainingAllowed setting.
If chaining is disabled in the replica set, enabling this parameter allows the node
to chain regardless.
set_at: [ startup ]
cpp_vartype: AtomicWord<bool>
cpp_varname: enableOverrideClusterChainingSetting
default: false
disableSplitHorizonIPCheck:
description: >-
If true, disable check for IP addresses in split horizon configurations. As per the
definition of SNI laid out in RFC6066, literal IP addresses are not allowed as server names.
set_at: startup
cpp_vartype: bool
cpp_varname: disableSplitHorizonIPCheck
default: false
tenantMigrationGarbageCollectionDelayMS:
description: >-
The amount of time in milliseconds that the donor or recipient should wait before
removing the migration state document after receiving donorForgetMigration or
recipientForgetMigration.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: tenantMigrationGarbageCollectionDelayMS
default:
expr: 15 * 60 * 1000
tenantMigrationExternalKeysRemovalBufferSecs:
description: >-
The amount of time in seconds that the donor or recipient should wait before
removing the cluster time keys cloned from each other after receiving
donorForgetMigration or recipientForgetMigration
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: tenantMigrationExternalKeysRemovalBufferSecs
default:
expr: 60 * 60 * 24 # 24 hours
tenantMigrationOplogBufferPeekCacheSize:
description: >-
Set this to specify size of read ahead buffer in the OplogBufferCollection for tenant
migrations.
set_at: startup
cpp_vartype: int
cpp_varname: tenantMigrationOplogBufferPeekCacheSize
default: 10000
tenantMigrationOplogFetcherBatchSize:
description: >-
The batchSize to use for the find/getMore queries called by the OplogFetcher for
tenant migrations.
set_at: startup
cpp_vartype: int
cpp_varname: tenantMigrationOplogFetcherBatchSize
# 16MB max batch size / 12 byte min doc size * 10 (for good measure) =
# defaultBatchSize to use.
default:
expr: (16 * 1024 * 1024) / 12 * 10
maxTenantMigrationRecipientThreadPoolSize:
description: >-
The maximum number of threads in the tenant migration recipient's thread pool.
set_at: startup
cpp_vartype: int
cpp_varname: maxTenantMigrationRecipientThreadPoolSize
# Each migration needs at least 2 dedicated threads for oplog fetching and data cloning plus
# 1 thread for other async jobs. To support 50 concurrent migrations on the recipient, we
# set the thread pool size to 128.
default: 128
validator:
gte: 1
maxTenantMigrationDonorServiceThreadPoolSize:
description: >-
The maximum number of threads in the tenant migration donor service's thread pool.
set_at: startup
cpp_vartype: int
cpp_varname: maxTenantMigrationDonorServiceThreadPoolSize
default: 8
validator:
gte: 1
shardSplitTimeoutMS:
description: >-
Period of time, in milliseconds, after which a shard split should be interrupted.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: shardSplitTimeoutMS
default: 3600000
validator:
gte: 1
tenantMigrationBlockingStateTimeoutMS:
description: >-
Period of time, in milliseconds, after which the blocking state should be interrupted.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: tenantMigrationBlockingStateTimeoutMS
# TODO: SERVER-53571 determine the best default timeout.
default: 3600000
validator:
gte: 1
# TODO (SERVER-54085): Remove server parameter tenantMigrationDisableX509Auth.
tenantMigrationDisableX509Auth:
description: >-
Disable x509 authentication for all ingoing and outgoing tenant migrations.
set_at: [ startup ]
cpp_vartype: bool
cpp_varname: tenantMigrationDisableX509Auth
default: false
tenantMigrationExcludeDonorHostTimeoutMS:
description: >-
Period of time, in milliseconds, that a donor host should be excluded for.
set_at: startup
cpp_vartype: int
cpp_varname: tenantMigrationExcludeDonorHostTimeoutMS
default: 60000
validator:
gte: 1
enableDefaultWriteConcernUpdatesForInitiate:
description: >-
When enabled, allow the default write concern to be updated in the replica set
initiate process. This parameter should only ever be enabled in our testing
infrastructure, as using it in production may lead to undefined behavior.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<bool>
cpp_varname: enableDefaultWriteConcernUpdatesForInitiate
default: false
startupRecoveryForRestore:
description: >-
When set, do startup recovery in such a way that the history of the recovered
operations is not preserved. At the end of startup recovery, snapshot reads before
the recovered top of oplog will not be possible. Reduces cache pressure when
recovering many oplog entries, as when restoring from backup in some scenarios.
set_at: startup
cpp_vartype: bool
cpp_varname: startupRecoveryForRestore
default: false
storeFindAndModifyImagesInSideCollection:
description: >-
Determines where document images for retryable find and modifies are to be stored.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<bool>
cpp_varname: gStoreFindAndModifyImagesInSideCollection
default: true
enableReconfigRollbackCommittedWritesCheck:
description: >-
Enables the reconfig check to ensure that committed writes cannot be rolled back in
sets with arbiters. Enabled by default. Test-only.
test_only: true
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<bool>
cpp_varname: enableReconfigRollbackCommittedWritesCheck
default: true
initialSyncMethod:
description: >-
Specifies which method of initial sync to use. Valid options are: fileCopyBased,
logical.
set_at: startup
cpp_vartype: std::string
cpp_varname: initialSyncMethod
default: "logical"
fileBasedInitialSyncMaxLagSec:
description: >-
Specifies the max lag in seconds between the syncing node and the sync source to mark
the file copy based initial sync done successfully.
set_at: startup
cpp_vartype: int
cpp_varname: fileBasedInitialSyncMaxLagSec
default:
expr: 5 * 60
validator:
gte: 0
fileBasedInitialSyncMaxCyclesWithoutProgress:
description: >-
Specifies the max number of cycles that file copy based initial sync runs trying to
clone updates while the lag between the syncing node and the sync source is higher than
fileBasedInitialSyncMaxLagSec.
set_at: startup
cpp_vartype: int
cpp_varname: fileBasedInitialSyncMaxCyclesWithoutProgress
default: 3
validator:
gte: 1
fileBasedInitialSyncExtendCursorTimeoutMS:
description: >-
Period of time, in milliseconds, syncing node may wait for the sync source to have the
full oplog history up to the backup point in time.
set_at: startup
cpp_vartype: int
cpp_varname: fileBasedInitialSyncExtendCursorTimeoutMS
default:
expr: 180 * 1000
validator:
gte: 1
fassertOnLockTimeoutForStepUpDown:
description: >-
Time limit threshold to fassert if getting RSTL times out when executing a stepdown or stepup command.
Set to 0 to disable.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: fassertOnLockTimeoutForStepUpDown
# We set the default to 30 seconds explicitly to match sharding's kDefaultConfigCommandTimeout.
default: 30
validator:
gte: 0
allowMultipleArbiters:
description: >-
Allow multiple arbiters. Default is false as it can put data at risk by allowing a
replica set to accept writes without a sufficient number of secondaries being available
for data replication.
For example, a PSSAA replica set (primary, 2 secondaries, 2 arbiters) would still be
available for writes after the two secondaries fail.
In that case, only one copy of the data, on the primary, would be actively updated.
The replica set would have a majority of nodes available for election purposes,
but no active replication until at least one healthy secondary is available.
set_at: startup
cpp_vartype: bool
cpp_varname: allowMultipleArbiters
default: false
shardSplitGarbageCollectionDelayMS:
description: >-
The amount of time in milliseconds that the donor should wait before
removing the shard split state document after receiving forgetShardSplit.
set_at: [ startup, runtime ]
cpp_vartype: AtomicWord<int>
cpp_varname: shardSplitGarbageCollectionDelayMS
default:
expr: 15 * 60 * 1000
feature_flags:
featureFlagRetryableFindAndModify:
description: >-
When enabled, storeFindAndModifyImagesInOplog=false will change the location of any
document images for retryable find and modifies.
cpp_varname: feature_flags::gFeatureFlagRetryableFindAndModify
default: true
version: 5.1
featureFlagFileCopyBasedInitialSync:
description: >-
When enabled, file copy based initial sync will be supported in Enterprise Server.
cpp_varname: feature_flags::gFileCopyBasedInitialSync
default: true
version: 5.2
featureFlagShardMerge:
description: When enabled, multitenant migration uses the "shard merge" protocol.
cpp_varname: feature_flags::gShardMerge
default: false
featureFlagShardSplit:
description: When enabled, multitenant migration can use the shard split commands.
cpp_varname: feature_flags::gShardSplit
default: false
|