summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2020-05-19 08:49:26 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-07-17 15:52:43 +0000
commit00fbc981646d9e6ebc391f45a31f4070d4466753 (patch)
tree226e1cdfea70be2b4b71d872350e7fd8b82436a9
parent9238911d0a46f26419ecdbec4293457b9e1a891d (diff)
downloadmongo-00fbc981646d9e6ebc391f45a31f4070d4466753.tar.gz
SERVER-38987 Replace ephemeralForTest storage engine with biggie implementation
ephemeralForTest is now a document level locking engine unittests instantiate the oplog as it is required with doc-level locking engines Added a 'incompatible_with_eft' tag for tests that don't work with this engine for different reasons. Many concurrency suites are disabled due to excessive memory usage
-rw-r--r--etc/evergreen.yml46
-rw-r--r--jstests/auth/arbiter.js3
-rw-r--r--jstests/concurrency/fsm_workloads/agg_group_external.js2
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort_external.js3
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js3
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_wildcard.js3
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js3
-rw-r--r--jstests/concurrency/fsm_workloads/reindex_background.js3
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js2
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js3
-rw-r--r--jstests/noPassthroughWithMongod/dup_bgindex.js2
-rw-r--r--jstests/noPassthroughWithMongod/geo_mnypts.js2
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js3
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_interrupts.js4
-rw-r--r--jstests/parallel/basic.js3
-rw-r--r--jstests/parallel/basicPlus.js2
-rw-r--r--jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js2
-rw-r--r--jstests/replsets/initial_sync1.js3
-rw-r--r--jstests/replsets/remove1.js3
-rw-r--r--jstests/slow1/conc_update.js3
-rw-r--r--src/mongo/SConscript3
-rw-r--r--src/mongo/db/auth/auth_op_observer_test.cpp1
-rw-r--r--src/mongo/db/catalog/capped_utils_test.cpp6
-rw-r--r--src/mongo/db/catalog/collection_validation_test.cpp10
-rw-r--r--src/mongo/db/catalog/create_collection_test.cpp6
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp1
-rw-r--r--src/mongo/db/commands/mr_test.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp11
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp3
-rw-r--r--src/mongo/db/free_mon/free_mon_storage_test.cpp3
-rw-r--r--src/mongo/db/op_observer_impl_test.cpp1
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/repl/mock_repl_coord_server_fixture.cpp2
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl_test.cpp26
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp23
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination_test.cpp2
-rw-r--r--src/mongo/db/session_catalog_mongod_test.cpp3
-rw-r--r--src/mongo/db/storage/SConscript4
-rw-r--r--src/mongo/db/storage/biggie/SConscript72
-rw-r--r--src/mongo/db/storage/biggie/biggie_init.cpp97
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.cpp617
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.h233
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store_test.cpp104
-rw-r--r--src/mongo/db/storage/biggie/biggie_recovery_unit.cpp144
-rw-r--r--src/mongo/db/storage/biggie/biggie_recovery_unit.h98
-rw-r--r--src/mongo/db/storage/devnull/SConscript4
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp593
-rw-r--r--src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h189
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/SConscript99
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp657
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.h52
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp93
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp139
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h135
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp76
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp40
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.cpp (renamed from src/mongo/db/storage/biggie/biggie_kv_engine.cpp)8
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h (renamed from src/mongo/db/storage/biggie/biggie_kv_engine.h)18
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp (renamed from src/mongo/db/storage/biggie/biggie_kv_engine_test.cpp)16
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h (renamed from src/mongo/db/storage/biggie/store.h)4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store_concurrent_test.cpp (renamed from src/mongo/db/storage/biggie/radix_store_concurrent_test.cpp)6
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store_test.cpp (renamed from src/mongo/db/storage/biggie/store_test.cpp)6
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp901
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h213
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp56
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp133
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h65
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp (renamed from src/mongo/db/storage/biggie/biggie_recovery_unit_test.cpp)18
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.cpp (renamed from src/mongo/db/storage/biggie/biggie_server_status.cpp)24
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.h (renamed from src/mongo/db/storage/biggie/biggie_server_status.h)8
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp (renamed from src/mongo/db/storage/biggie/biggie_sorted_impl.cpp)12
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.h (renamed from src/mongo/db/storage/biggie/biggie_sorted_impl.h)6
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl_test.cpp (renamed from src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp)30
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp (renamed from src/mongo/db/storage/biggie/biggie_visibility_manager.cpp)8
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.h (renamed from src/mongo/db/storage/biggie/biggie_visibility_manager.h)4
-rw-r--r--src/mongo/db/storage/kv/storage_engine_test.cpp51
-rw-r--r--src/mongo/db/storage/storage_engine_test_fixture.h9
-rw-r--r--src/mongo/db/storage/storage_repair_observer_test.cpp6
-rw-r--r--src/mongo/dbtests/SConscript1
80 files changed, 1858 insertions, 3397 deletions
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 72f8c7f1180..3a254215e1f 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -11743,20 +11743,6 @@ buildvariants:
# storage engine buildvariants #
################################
-- name: rhel-62-64-bit-biggie
- display_name: RHEL 6.2 (Biggie)
- run_on:
- - rhel62-small
- expansions:
- test_flags: --storageEngine=biggie --excludeWithAnyTags=requires_persistence,requires_journaling,uses_transactions,requires_wiredtiger,requires_snapshot_read
- compile_flags: MONGO_DISTMOD=rhel62 -j$(grep -c ^processor /proc/cpuinfo) --dbg=off --opt=on --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars
- scons_cache_scope: shared
- tasks:
- - name: compile_all_run_unittests_TG
- distros:
- - rhel62-large
- - name: jsCore
-
- name: enterprise-rhel-62-benchmarks
display_name: Enterprise RHEL 6.2 (Benchmarks)
modules:
@@ -11837,7 +11823,7 @@ buildvariants:
- name: linux-64-ephemeralForTest
display_name: Linux (ephemeralForTest)
run_on:
- - rhel62-small
+ - rhel62-large
expansions:
# Transactions are not explicitly supported on the ephemeralForTest storage engine.
# Speculative majority reads are currently only allowed for change streams, which are only supported on WiredTiger.
@@ -11846,7 +11832,7 @@ buildvariants:
# correctly after rollbackViaRefetch.
test_flags: >-
--storageEngine=ephemeralForTest
- --excludeWithAnyTags=requires_persistence,requires_fsync,SERVER-21420,SERVER-21658,requires_journaling,requires_wiredtiger,uses_transactions,requires_document_locking,uses_speculative_majority,requires_snapshot_read,requires_majority_read_concern,uses_change_streams,requires_sharding
+ --excludeWithAnyTags=requires_persistence,requires_fsync,SERVER-21420,SERVER-21658,requires_journaling,requires_wiredtiger,uses_transactions,requires_document_locking,uses_speculative_majority,requires_snapshot_read,requires_majority_read_concern,uses_change_streams,requires_sharding,incompatible_with_eft
--mongodSetParameters="{oplogApplicationEnforcesSteadyStateConstraints: false}"
compile_flags: -j$(grep -c ^processor /proc/cpuinfo) --dbg=off --opt=on --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars
multiversion_platform: rhel62
@@ -11854,26 +11840,17 @@ buildvariants:
scons_cache_scope: shared
tasks:
- name: compile_all_run_unittests_TG
- distros:
- - rhel62-large
- name: .aggfuzzer .common
- name: aggregation
- name: .auth !.multiversion !.audit !.sharding
- name: .misc_js
- name: concurrency
- distros:
- - rhel62-large # Some workloads require a lot of memory, use a bigger machine for this suite.
- - name: concurrency_replication
- - name: concurrency_replication_causal_consistency
- distros:
- - rhel62-large
- - name: concurrency_simultaneous
- - name: concurrency_simultaneous_replication
- distros:
- - rhel62-large
+ # SERVER-36709: Disabled due to excessive memory usage
+ # concurrency_replication
+ # concurrency_replication_causal_consistency
+ # concurrency_simultaneous
+ # concurrency_simultaneous_replication
- name: .integration !.audit
- distros:
- - rhel62-large
- name: .jscore .common !.txns !.decimal
- name: jsCore_op_query
- name: .jstestfuzz .common
@@ -11881,7 +11858,8 @@ buildvariants:
- name: .read_write_concern .linearize
- name: replica_sets
- name: .replica_sets .common
- - name: rollback_fuzzer_gen
+ # SERVER-49428: Disabled due to writeConcernMajorityJournalDefault is not off
+ # rollback_fuzzer_gen
- name: .updatefuzzer
- name: enterprise-rhel-71-ppc64le-inmem
@@ -12172,7 +12150,7 @@ buildvariants:
- name: libfuzzertests_TG
- name: enterprise-ubuntu2004-debug-tsan
- display_name: ~ TSAN Enterprise Ubuntu 20.04 DEBUG (Biggie)
+ display_name: ~ TSAN Enterprise Ubuntu 20.04 DEBUG (ephemeralForTest)
modules:
- enterprise
run_on:
@@ -12184,9 +12162,9 @@ buildvariants:
variant_path_suffix: /opt/mongodbtoolchain/v3/bin
lang_environment: LANG=C
san_options: TSAN_OPTIONS="die_after_fork=0:suppressions=etc/tsan.suppressions:halt_on_error=1"
- # TODO: Remove some of the excluded tags when the Biggie storage engine is
+ # TODO: Remove some of the excluded tags when the ephemeralForTest storage engine is
# further along in development: https://jira.mongodb.org/browse/SERVER-48325
- test_flags: --storageEngine=biggie --excludeWithAnyTags=requires_persistence,requires_journaling,uses_transactions,requires_wiredtiger,requires_snapshot_read
+ test_flags: --storageEngine=ephemeralForTest --excludeWithAnyTags=requires_persistence,requires_journaling,uses_transactions,requires_wiredtiger,requires_snapshot_read
compile_flags: --variables-files=etc/scons/mongodbtoolchain_v3_clang.vars --dbg=on --opt=on --allocator=system --sanitize=thread --ssl --enable-free-mon=on --use-libunwind=off -j$(grep -c ^processor /proc/cpuinfo)
# Avoid starting too many mongod's under TSAN build.
resmoke_jobs_factor: 0.3
diff --git a/jstests/auth/arbiter.js b/jstests/auth/arbiter.js
index 1254172dd88..8ab1cc322f6 100644
--- a/jstests/auth/arbiter.js
+++ b/jstests/auth/arbiter.js
@@ -1,6 +1,7 @@
// Certain commands should be run-able from arbiters under localhost, but not from
// any other nodes in the replset.
-// @tags: [requires_replication]
+// SERVER-48314: Disabled for ephemeralForTest due to lacking timestamp support
+// @tags: [requires_replication, incompatible_with_eft]
(function() {
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
index adb7a787e20..5eab0316463 100644
--- a/jstests/concurrency/fsm_workloads/agg_group_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -7,6 +7,8 @@
*
* The data passed to the $group is greater than 100MB, which should force
* disk to be used.
+ * SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+ * @tags: [incompatible_with_eft]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
index b8cbad826bb..1fac22dbaa3 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -7,6 +7,9 @@
* by a $sort on a field containing a random float.
*
* The data returned by the $match is greater than 100MB, which should force an external sort.
+ *
+ * SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+ * @tags: [incompatible_with_eft]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
index c1f0da117f2..b16ac3d25a3 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
@@ -5,7 +5,8 @@
*
* Creates multiple unique background indexes in parallel, on capped collections.
*
- * @tags: [creates_background_indexes, requires_capped]
+ * SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+ * @tags: [creates_background_indexes, requires_capped, incompatible_with_eft]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js b/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
index 46f37adb3ee..b370b2f3d99 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
@@ -3,7 +3,8 @@
/**
* Executes the create_index_background.js workload, but with a wildcard index.
*
- * @tags: [creates_background_indexes]
+ * SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+ * @tags: [creates_background_indexes, incompatible_with_eft]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // For extendWorkload.
load('jstests/concurrency/fsm_workloads/create_index_background.js'); // For $config.
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
index 981568904ad..38718206bae 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
@@ -12,6 +12,9 @@
* trying to remove the same document from the queue.
*
* This workload was designed to reproduce SERVER-21434.
+ *
+ * SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+ * @tags: [incompatible_with_eft]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
diff --git a/jstests/concurrency/fsm_workloads/reindex_background.js b/jstests/concurrency/fsm_workloads/reindex_background.js
index 575cf89400e..6d7fa0c66d2 100644
--- a/jstests/concurrency/fsm_workloads/reindex_background.js
+++ b/jstests/concurrency/fsm_workloads/reindex_background.js
@@ -8,7 +8,8 @@
* that because indexes are initially built in the background, reindexing is also done in the
* background.
*
- * @tags: [SERVER-40561, creates_background_indexes]
+ * SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+ * @tags: [SERVER-40561, creates_background_indexes, incompatible_with_eft]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 467c46e3698..c3e8aa5d316 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -1,4 +1,6 @@
// Test sanity of geo queries with a lot of points
+// SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+// @tags: [incompatible_with_eft]
(function() {
"use strict";
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index 24b7f8858ef..d9649b5b0f8 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -1,4 +1,7 @@
// Tests the "writeCmd" and "readCmd" options to benchRun().
+// SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+// @tags: [incompatible_with_eft]
+
(function() {
"use strict";
diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js
index 02ac4bf7870..b5748e3327c 100644
--- a/jstests/noPassthroughWithMongod/dup_bgindex.js
+++ b/jstests/noPassthroughWithMongod/dup_bgindex.js
@@ -1,4 +1,6 @@
// Try to create two identical indexes, via background. Shouldn't be allowed by the server.
+// SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+// @tags: [incompatible_with_eft]
(function() {
var t = db.duplIndexTest;
t.drop();
diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js
index d3ae714b69b..53e19e24eba 100644
--- a/jstests/noPassthroughWithMongod/geo_mnypts.js
+++ b/jstests/noPassthroughWithMongod/geo_mnypts.js
@@ -1,4 +1,6 @@
// Test sanity of geo queries with a lot of points
+// SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+// @tags: [incompatible_with_eft]
var coll = db.testMnyPts;
coll.drop();
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index ce7f9ebf67c..80ff03fc63a 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -1,5 +1,6 @@
//
-// @tags: [SERVER-40561]
+// SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+// @tags: [SERVER-40561, incompatible_with_eft]
//
t = db.geo_polygon4;
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index 218002b0cc6..2f0694547c0 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -6,7 +6,9 @@
* and tries to perform another task in parallel while the background index task is
* active. The problem is that this is timing dependent and the current test setup
* tries to achieve this by inserting insane amount of documents.
- * @tags: [requires_replication]
+ *
+ * SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+ * @tags: [requires_replication, incompatible_with_eft]
*/
/**
diff --git a/jstests/parallel/basic.js b/jstests/parallel/basic.js
index 8ea755a3307..a90413dd4ba 100644
--- a/jstests/parallel/basic.js
+++ b/jstests/parallel/basic.js
@@ -1,4 +1,7 @@
// perform basic js tests in parallel
+// SERVER-49674: Disabled for ephemeralForTest
+// @tags: [incompatible_with_eft]
+
load('jstests/libs/parallelTester.js');
Random.setRandomSeed();
diff --git a/jstests/parallel/basicPlus.js b/jstests/parallel/basicPlus.js
index 44faf5c439b..7ed7cb82747 100644
--- a/jstests/parallel/basicPlus.js
+++ b/jstests/parallel/basicPlus.js
@@ -1,4 +1,6 @@
// perform basic js tests in parallel & some other tasks as well
+// SERVER-49673: Incompatible with ephemeralForTest
+// @tags: [incompatible_with_eft]
load('jstests/libs/parallelTester.js');
var c = db.jstests_parallel_basicPlus;
diff --git a/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js b/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js
index 951ffdf2ecb..2c078261513 100644
--- a/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js
+++ b/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js
@@ -1,5 +1,7 @@
/**
* Tests the streamable isMaster protocol against nodes with invalid replica set configs.
+ * SERVER-49428: Disable for ephemeralForTest, writeConcernMajorityJournalDefault is not off
+ * @tags: [incompatible_with_eft]
*/
(function() {
"use strict";
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index c01e6ff51d7..c8da1271bb8 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -13,8 +13,11 @@
* 11. Everyone happy eventually
*
* This test assumes a 'newlyAdded' removal.
+ *
+ * SERVER-49428: Disable for ephemeralForTest, writeConcernMajorityJournalDefault is not off
* @tags: [
* requires_fcv_46,
+ * incompatible_with_eft,
* ]
*/
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index 352cb874663..447c0b2d478 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -8,8 +8,11 @@
* Make sure both nodes are either primary or secondary
*
* This test assumes 'newlyAdded' fields are enabled, so blacklist from multiversion tests in 4.6.
+ *
+ * SERVER-49428: Disable for ephemeralForTest, writeConcernMajorityJournalDefault is not off
* @tags: [
* requires_fcv_46,
+ * incompatible_with_eft,
* ]
*/
diff --git a/jstests/slow1/conc_update.js b/jstests/slow1/conc_update.js
index 34398d91f3f..64332cee57f 100644
--- a/jstests/slow1/conc_update.js
+++ b/jstests/slow1/conc_update.js
@@ -1,3 +1,6 @@
+// SERVER-36709: Disabled for ephemeralForTest due to excessive memory usage
+// @tags: [incompatible_with_eft]
+
(function() {
"use strict";
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index eb2db8e7cc0..54e0f1239c7 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -486,9 +486,8 @@ env.Library(
'db/stats/serveronly_stats',
'db/stats/top',
'db/storage/backup_cursor_hooks',
- 'db/storage/biggie/storage_biggie',
- 'db/storage/devnull/storage_devnull',
'db/storage/ephemeral_for_test/storage_ephemeral_for_test',
+ 'db/storage/devnull/storage_devnull',
'db/storage/flow_control_parameters',
'db/storage/flow_control',
'db/storage/storage_control',
diff --git a/src/mongo/db/auth/auth_op_observer_test.cpp b/src/mongo/db/auth/auth_op_observer_test.cpp
index da3c89d0541..8527a96af52 100644
--- a/src/mongo/db/auth/auth_op_observer_test.cpp
+++ b/src/mongo/db/auth/auth_op_observer_test.cpp
@@ -46,7 +46,6 @@
#include "mongo/db/repl/storage_interface_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/session_catalog_mongod.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/unittest/death_test.h"
#include "mongo/util/clock_source_mock.h"
diff --git a/src/mongo/db/catalog/capped_utils_test.cpp b/src/mongo/db/catalog/capped_utils_test.cpp
index 986552622c8..b3cef4c8279 100644
--- a/src/mongo/db/catalog/capped_utils_test.cpp
+++ b/src/mongo/db/catalog/capped_utils_test.cpp
@@ -66,6 +66,8 @@ void CappedUtilsTest::setUp() {
ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY));
repl::ReplicationCoordinator::set(service, std::move(replCoord));
+ repl::setOplogCollectionName(service);
+
_storage = std::make_unique<repl::StorageInterfaceImpl>();
}
@@ -80,7 +82,9 @@ void CappedUtilsTest::tearDown() {
* Creates an OperationContext.
*/
ServiceContext::UniqueOperationContext makeOpCtx() {
- return cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
+ repl::createOplog(opCtx.get());
+ return opCtx;
}
/**
diff --git a/src/mongo/db/catalog/collection_validation_test.cpp b/src/mongo/db/catalog/collection_validation_test.cpp
index a9ac30cb35d..90e548bb9eb 100644
--- a/src/mongo/db/catalog/collection_validation_test.cpp
+++ b/src/mongo/db/catalog/collection_validation_test.cpp
@@ -267,16 +267,6 @@ TEST_F(CollectionValidationTest, ValidateEnforceFastCount) {
/*numErrors*/ 0,
{CollectionValidation::ValidateMode::kForegroundFullEnforceFastCount});
}
-TEST_F(CollectionValidationTest, ValidateEnforceFastCountError) {
- FailPointEnableBlock failPoint("ephemeralForTestReturnIncorrectNumRecords");
- auto opCtx = operationContext();
- foregroundValidate(opCtx,
- /*valid*/ false,
- /*numRecords*/ insertDataRange(opCtx, 0, 5),
- /*numInvalidDocuments*/ 0,
- /*numErrors*/ 1,
- {CollectionValidation::ValidateMode::kForegroundFullEnforceFastCount});
-}
/**
* Waits for a parallel running collection validation operation to start and then hang at a
diff --git a/src/mongo/db/catalog/create_collection_test.cpp b/src/mongo/db/catalog/create_collection_test.cpp
index 2c4dd9203fa..ea9bc5c02a5 100644
--- a/src/mongo/db/catalog/create_collection_test.cpp
+++ b/src/mongo/db/catalog/create_collection_test.cpp
@@ -71,6 +71,7 @@ void CreateCollectionTest::setUp() {
auto replCoord = std::make_unique<repl::ReplicationCoordinatorMock>(service);
ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY));
repl::ReplicationCoordinator::set(service, std::move(replCoord));
+ repl::setOplogCollectionName(service);
_storage = std::make_unique<repl::StorageInterfaceImpl>();
}
@@ -86,7 +87,9 @@ void CreateCollectionTest::tearDown() {
* Creates an OperationContext.
*/
ServiceContext::UniqueOperationContext makeOpCtx() {
- return cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
+ repl::createOplog(opCtx.get());
+ return opCtx;
}
void CreateCollectionTest::validateValidator(const std::string& validatorStr,
@@ -105,6 +108,7 @@ void CreateCollectionTest::validateValidator(const std::string& validatorStr,
ASSERT_TRUE(db) << "Cannot create collection " << newNss << " because database " << newNss.db()
<< " does not exist.";
+ WriteUnitOfWork wuow(opCtx.get());
const auto status =
db->userCreateNS(opCtx.get(), newNss, options, false /*createDefaultIndexes*/);
ASSERT_EQ(expectedError, status.code());
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index 5ecb49e607f..3acd605af57 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -216,7 +216,6 @@ void OpObserverMock::onInserts(OperationContext* opCtx,
uasserted(ErrorCodes::OperationFailed, "insert failed");
}
- ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
onInsertsIsTargetDatabaseExclusivelyLocked =
opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X);
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 1a9b1271ca3..2199c17daa9 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -414,6 +414,8 @@ void MapReduceCommandTest::setUp() {
// Transition to PRIMARY so that the server can accept writes.
ASSERT_OK(_getReplCoord()->setFollowerMode(repl::MemberState::RS_PRIMARY));
+ repl::setOplogCollectionName(service);
+ repl::createOplog(_opCtx.get());
// Create collection with one document.
CollectionOptions collectionOptions;
@@ -528,7 +530,7 @@ TEST_F(MapReduceCommandTest, ReplacingExistingOutputCollectionPreservesIndexes)
auto indexSpec = BSON("v" << 2 << "key" << BSON("a" << 1) << "name"
<< "a_1");
{
- AutoGetCollection autoColl(_opCtx.get(), outputNss, MODE_IX);
+ AutoGetCollection autoColl(_opCtx.get(), outputNss, MODE_X);
auto coll = autoColl.getCollection();
ASSERT(coll);
auto indexCatalog = coll->getIndexCatalog();
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 76d8e358400..6154cb14b49 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -1217,10 +1217,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_IX));
-
- // TODO: This is TRUE because Lock::CollectionLock converts IS lock to S
- ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
-
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_S));
ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_X));
}
@@ -1246,12 +1243,10 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IX) {
{
Lock::CollectionLock collLock(opCtx.get(), ns, MODE_IX);
- // TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
-
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
- ASSERT(lockState->isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_X));
}
{
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index 331770e7275..b594d09f6b2 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -429,6 +429,9 @@ void FreeMonControllerTest::setUp() {
// Transition to PRIMARY so that the server can accept writes.
ASSERT_OK(_getReplCoord()->setFollowerMode(repl::MemberState::RS_PRIMARY));
+ repl::setOplogCollectionName(service);
+ repl::createOplog(_opCtx.get());
+
// Create collection with one document.
CollectionOptions collectionOptions;
collectionOptions.uuid = UUID::gen();
diff --git a/src/mongo/db/free_mon/free_mon_storage_test.cpp b/src/mongo/db/free_mon/free_mon_storage_test.cpp
index dc813d35e4b..fee8f984d62 100644
--- a/src/mongo/db/free_mon/free_mon_storage_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_storage_test.cpp
@@ -86,6 +86,9 @@ void FreeMonStorageTest::setUp() {
// Transition to PRIMARY so that the server can accept writes.
ASSERT_OK(_getReplCoord()->setFollowerMode(repl::MemberState::RS_PRIMARY));
+
+ repl::setOplogCollectionName(service);
+ repl::createOplog(_opCtx.get());
}
void FreeMonStorageTest::tearDown() {
diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp
index 16c59b16ef3..856f650f6c3 100644
--- a/src/mongo/db/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer_impl_test.cpp
@@ -48,7 +48,6 @@
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/session_catalog_mongod.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/db/transaction_participant_gen.h"
#include "mongo/rpc/get_status_from_command_result.h"
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index c9aeb038459..c80520efe5f 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -1376,6 +1376,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
'$BUILD_DIR/mongo/db/service_context_test_fixture',
'$BUILD_DIR/mongo/db/stats/counters',
+ '$BUILD_DIR/mongo/db/storage/ephemeral_for_test/storage_ephemeral_for_test_core',
'$BUILD_DIR/mongo/db/transaction',
'$BUILD_DIR/mongo/dbtests/mocklib',
'$BUILD_DIR/mongo/executor/network_interface_factory',
diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
index dec18f8e014..09d79862c07 100644
--- a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
+++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
@@ -98,11 +98,13 @@ void MockReplCoordServerFixture::insertOplogEntry(const repl::OplogEntry& entry)
auto coll = autoColl.getCollection();
ASSERT_TRUE(coll != nullptr);
+ WriteUnitOfWork wuow(opCtx());
auto status = coll->insertDocument(opCtx(),
InsertStatement(entry.toBSON()),
&CurOp::get(opCtx())->debug(),
/* fromMigrate */ false);
ASSERT_OK(status);
+ wuow.commit();
}
OperationContext* MockReplCoordServerFixture::opCtx() {
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp b/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp
index 8767fec7057..a9cc8a6a19c 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/storage/recovery_unit_noop.h"
+#include "mongo/db/storage/storage_engine_impl.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/str.h"
@@ -229,18 +230,19 @@ TEST_F(ReplicationConsistencyMarkersTest, ReplicationConsistencyMarkers) {
// Check oplog truncate after point document.
ASSERT_EQUALS(endOpTime.getTimestamp(), consistencyMarkers.getOplogTruncateAfterPoint(opCtx));
- // Recovery unit will be owned by "opCtx".
- RecoveryUnitWithDurabilityTracking* recoveryUnit = new RecoveryUnitWithDurabilityTracking();
- opCtx->setRecoveryUnit(std::unique_ptr<RecoveryUnit>(recoveryUnit),
- WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork);
-
- // Set min valid without waiting for the changes to be durable.
- OpTime endOpTime2({Seconds(789), 0}, 1LL);
- consistencyMarkers.setMinValid(opCtx, endOpTime2);
- consistencyMarkers.clearAppliedThrough(opCtx, {});
- ASSERT_EQUALS(consistencyMarkers.getAppliedThrough(opCtx), OpTime());
- ASSERT_EQUALS(consistencyMarkers.getMinValid(opCtx), endOpTime2);
- ASSERT_FALSE(recoveryUnit->waitUntilDurableCalled);
+ // SERVER-49685: We can't use a RecoveryUnitNoop with ephemeralForTest
+ //// Recovery unit will be owned by "opCtx".
+ // RecoveryUnitWithDurabilityTracking* recoveryUnit = new RecoveryUnitWithDurabilityTracking();
+ // opCtx->setRecoveryUnit(std::unique_ptr<RecoveryUnit>(recoveryUnit),
+ // WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork);
+
+ //// Set min valid without waiting for the changes to be durable.
+ // OpTime endOpTime2({Seconds(789), 0}, 1LL);
+ // consistencyMarkers.setMinValid(opCtx, endOpTime2);
+ // consistencyMarkers.clearAppliedThrough(opCtx, {});
+ // ASSERT_EQUALS(consistencyMarkers.getAppliedThrough(opCtx), OpTime());
+ // ASSERT_EQUALS(consistencyMarkers.getMinValid(opCtx), endOpTime2);
+ // ASSERT_FALSE(recoveryUnit->waitUntilDurableCalled);
}
TEST_F(ReplicationConsistencyMarkersTest, InitialSyncId) {
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index 174124c82ae..869f422f39f 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -187,6 +187,9 @@ private:
ASSERT_OK(
ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY));
+ repl::setOplogCollectionName(service);
+ repl::createOplog(_opCtx.get());
+
ASSERT_OK(_storageInterface->createCollection(
getOperationContext(), testNs, generateOptionsWithUuid()));
@@ -327,11 +330,6 @@ CollectionOptions _createOplogCollectionOptions() {
* Creates an oplog with insert entries at the given timestamps.
*/
void _setUpOplog(OperationContext* opCtx, StorageInterface* storage, std::vector<int> timestamps) {
- ASSERT_OK(storage->createCollection(opCtx, oplogNs, _createOplogCollectionOptions()));
-
- // Initialize the cached pointer to the oplog collection.
- acquireOplogCollectionForLogging(opCtx);
-
for (int ts : timestamps) {
ASSERT_OK(storage->insertDocument(
opCtx, oplogNs, _makeInsertOplogEntry(ts), OpTime::kUninitializedTerm));
@@ -446,21 +444,6 @@ TEST_F(ReplicationRecoveryTest, RecoveryWithEmptyOplogSucceedsWithStableTimestam
_assertDocsInTestCollection(opCtx, {});
}
-DEATH_TEST_REGEX_F(ReplicationRecoveryTest,
- TruncateFassertsWithoutOplogCollection,
- "Fatal assertion.*34418.*NamespaceNotFound: Can't find local.oplog.rs") {
- ReplicationRecoveryImpl recovery(getStorageInterface(), getConsistencyMarkers());
- auto opCtx = getOperationContext();
-
- getConsistencyMarkers()->setOplogTruncateAfterPoint(opCtx, Timestamp(4, 4));
- getConsistencyMarkers()->setAppliedThrough(opCtx, OpTime(Timestamp(3, 3), 1));
-
- // Create the database.
- ASSERT_OK(getStorageInterface()->createCollection(
- opCtx, NamespaceString("local.other"), generateOptionsWithUuid()));
-
- recovery.recoverFromOplog(opCtx, boost::none);
-}
DEATH_TEST_F(ReplicationRecoveryTest,
RecoveryInvariantsIfStableTimestampAndDoesNotSupportRecoveryTimestamp,
diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
index 68119925dfb..ed74a769b6b 100644
--- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
@@ -1730,7 +1730,9 @@ TEST_F(SessionCatalogMigrationDestinationTest, MigratingKnownStmtWhileOplogTrunc
{
AutoGetCollection oplogColl(opCtx, NamespaceString::kRsOplogNamespace, MODE_X);
+ WriteUnitOfWork wuow(opCtx);
ASSERT_OK(oplogColl.getCollection()->truncate(opCtx)); // Empties the oplog collection.
+ wuow.commit();
}
{
diff --git a/src/mongo/db/session_catalog_mongod_test.cpp b/src/mongo/db/session_catalog_mongod_test.cpp
index 61a97fb7b7e..12adac67cef 100644
--- a/src/mongo/db/session_catalog_mongod_test.cpp
+++ b/src/mongo/db/session_catalog_mongod_test.cpp
@@ -49,6 +49,9 @@ protected:
ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY));
repl::ReplicationCoordinator::set(service, std::move(replCoord));
+ repl::setOplogCollectionName(service);
+ repl::createOplog(_opCtx);
+
service->setFastClockSource(std::make_unique<ClockSourceMock>());
}
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index fe02bdadf11..a7e1e41b9f2 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -5,7 +5,6 @@ env = env.Clone()
env.SConscript(
dirs=[
- 'biggie',
'devnull',
'ephemeral_for_test',
'kv',
@@ -453,11 +452,8 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/service_context_test_fixture',
'$BUILD_DIR/mongo/db/storage/devnull/storage_devnull_core',
'$BUILD_DIR/mongo/db/storage/durable_catalog_impl',
- '$BUILD_DIR/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store',
- '$BUILD_DIR/mongo/db/storage/ephemeral_for_test/storage_ephemeral_for_test_core',
'$BUILD_DIR/mongo/db/storage/storage_engine_impl',
'$BUILD_DIR/mongo/db/storage/storage_repair_observer',
- '$BUILD_DIR/mongo/db/storage/ephemeral_for_test/storage_ephemeral_for_test_core',
'$BUILD_DIR/mongo/executor/network_interface_factory',
'$BUILD_DIR/mongo/executor/network_interface_mock',
'flow_control',
diff --git a/src/mongo/db/storage/biggie/SConscript b/src/mongo/db/storage/biggie/SConscript
deleted file mode 100644
index d15cba68888..00000000000
--- a/src/mongo/db/storage/biggie/SConscript
+++ /dev/null
@@ -1,72 +0,0 @@
-# -*- mode: python; -*-
-
-Import("env")
-
-env = env.Clone()
-
-env.Library(
- target='storage_biggie_core',
- source=[
- 'biggie_kv_engine.cpp',
- 'biggie_record_store.cpp',
- 'biggie_recovery_unit.cpp',
- 'biggie_sorted_impl.cpp',
- 'biggie_visibility_manager.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
- '$BUILD_DIR/mongo/db/storage/index_entry_comparison',
- '$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
- '$BUILD_DIR/mongo/db/storage/recovery_unit_base',
- ],
- LIBDEPS_PRIVATE=[
- '$BUILD_DIR/mongo/db/storage/key_string',
- '$BUILD_DIR/mongo/db/storage/oplog_hack',
- '$BUILD_DIR/mongo/db/storage/write_unit_of_work',
- '$BUILD_DIR/mongo/db/commands/server_status',
- ],
-)
-
-env.Library(
- target='storage_biggie',
- source=[
- 'biggie_init.cpp',
- 'biggie_server_status.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/db/storage/durable_catalog_impl',
- '$BUILD_DIR/mongo/db/storage/storage_engine_impl',
- 'storage_biggie_core',
- ],
- LIBDEPS_PRIVATE=[
- '$BUILD_DIR/mongo/db/storage/storage_engine_common',
- '$BUILD_DIR/mongo/db/commands/server_status',
- ],
-)
-
-# Testing
-env.CppUnitTest(
- target='storage_biggie_test',
- source=[
- 'biggie_kv_engine_test.cpp',
- 'biggie_record_store_test.cpp',
- 'biggie_recovery_unit_test.cpp',
- 'biggie_sorted_impl_test.cpp',
- 'store_test.cpp',
- 'radix_store_concurrent_test.cpp',
- ],
- LIBDEPS=[
- 'storage_biggie_core',
- '$BUILD_DIR/mongo/db/auth/authmocks',
- '$BUILD_DIR/mongo/db/common',
- '$BUILD_DIR/mongo/db/index/index_descriptor',
- '$BUILD_DIR/mongo/db/repl/replmocks',
- '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
- '$BUILD_DIR/mongo/db/storage/key_string',
- '$BUILD_DIR/mongo/db/storage/kv/kv_engine_test_harness',
- '$BUILD_DIR/mongo/db/storage/record_store_test_harness',
- '$BUILD_DIR/mongo/db/storage/recovery_unit_test_harness',
- '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness',
- ],
-)
diff --git a/src/mongo/db/storage/biggie/biggie_init.cpp b/src/mongo/db/storage/biggie/biggie_init.cpp
deleted file mode 100644
index 148ec547ed8..00000000000
--- a/src/mongo/db/storage/biggie/biggie_init.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/base/init.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
-#include "mongo/db/storage/biggie/biggie_record_store.h"
-#include "mongo/db/storage/biggie/biggie_server_status.h"
-#include "mongo/db/storage/storage_engine_impl.h"
-#include "mongo/db/storage/storage_engine_init.h"
-#include "mongo/db/storage/storage_options.h"
-
-#if __has_feature(address_sanitizer)
-#include <sanitizer/lsan_interface.h>
-#endif
-
-namespace mongo {
-namespace biggie {
-
-namespace {
-class BiggieStorageEngineFactory : public StorageEngine::Factory {
-public:
- virtual std::unique_ptr<StorageEngine> create(const StorageGlobalParams& params,
- const StorageEngineLockFile* lockFile) const {
- auto kv = std::make_unique<KVEngine>();
- // We must only add the server parameters to the global registry once during unit testing.
- static int setupCountForUnitTests = 0;
- if (setupCountForUnitTests == 0) {
- ++setupCountForUnitTests;
-
- // Intentionally leaked.
- MONGO_COMPILER_VARIABLE_UNUSED auto leakedSection =
- new BiggieServerStatusSection(kv.get());
-
- // This allows unit tests to run this code without encountering memory leaks
-#if __has_feature(address_sanitizer)
- __lsan_ignore_object(leakedSection);
-#endif
- }
-
- StorageEngineOptions options;
- options.directoryPerDB = params.directoryperdb;
- options.forRepair = params.repair;
- return std::make_unique<StorageEngineImpl>(std::move(kv), options);
- }
-
- virtual StringData getCanonicalName() const {
- return kBiggieEngineName;
- }
-
- virtual Status validateMetadata(const StorageEngineMetadata& metadata,
- const StorageGlobalParams& params) const {
- return Status::OK();
- }
-
- virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
- return BSONObj();
- }
-};
-
-
-ServiceContext::ConstructorActionRegisterer registerBiggie(
- "RegisterBiggieEngine", [](ServiceContext* service) {
- registerStorageEngine(service, std::make_unique<BiggieStorageEngineFactory>());
- });
-
-} // namespace
-} // namespace biggie
-} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp
deleted file mode 100644
index 2322e1285f6..00000000000
--- a/src/mongo/db/storage/biggie/biggie_record_store.cpp
+++ /dev/null
@@ -1,617 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/biggie/biggie_record_store.h"
-
-#include <cstring>
-#include <memory>
-#include <utility>
-
-#include "mongo/bson/bsonobj.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
-#include "mongo/db/storage/biggie/biggie_visibility_manager.h"
-#include "mongo/db/storage/biggie/store.h"
-#include "mongo/db/storage/key_string.h"
-#include "mongo/db/storage/oplog_hack.h"
-#include "mongo/db/storage/write_unit_of_work.h"
-#include "mongo/util/hex.h"
-
-namespace mongo {
-namespace biggie {
-namespace {
-Ordering allAscending = Ordering::make(BSONObj());
-auto const version = KeyString::Version::V1;
-BSONObj const sample = BSON(""
- << "s"
- << "" << (int64_t)0);
-
-std::string createKey(StringData ident, int64_t recordId) {
- KeyString::Builder ks(version, BSON("" << ident << "" << recordId), allAscending);
- return std::string(ks.getBuffer(), ks.getSize());
-}
-
-RecordId extractRecordId(const std::string& keyStr) {
- KeyString::Builder ks(version, sample, allAscending);
- ks.resetFromBuffer(keyStr.c_str(), keyStr.size());
- BSONObj obj = KeyString::toBson(keyStr.c_str(), keyStr.size(), allAscending, ks.getTypeBits());
- auto it = BSONObjIterator(obj);
- ++it;
- return RecordId((*it).Long());
-}
-} // namespace
-
-
-const std::string kBiggieEngineName = "biggie";
-
-RecordStore::RecordStore(StringData ns,
- StringData ident,
- bool isCapped,
- int64_t cappedMaxSize,
- int64_t cappedMaxDocs,
- CappedCallback* cappedCallback,
- VisibilityManager* visibilityManager)
- : mongo::RecordStore(ns),
- _isCapped(isCapped),
- _cappedMaxSize(cappedMaxSize),
- _cappedMaxDocs(cappedMaxDocs),
- _identStr(ident.rawData(), ident.size()),
- _ident(_identStr.data(), _identStr.size()),
- _prefix(createKey(_ident, std::numeric_limits<int64_t>::min())),
- _postfix(createKey(_ident, std::numeric_limits<int64_t>::max())),
- _cappedCallback(cappedCallback),
- _isOplog(NamespaceString::oplog(ns)),
- _visibilityManager(visibilityManager) {
- if (_isCapped) {
- invariant(_cappedMaxSize > 0);
- invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
- } else {
- invariant(_cappedMaxSize == -1);
- invariant(_cappedMaxDocs == -1);
- }
-}
-
-const char* RecordStore::name() const {
- return "biggie";
-}
-
-const std::string& RecordStore::getIdent() const {
- return _identStr;
-}
-
-long long RecordStore::dataSize(OperationContext* opCtx) const {
- return _dataSize.load();
-}
-
-long long RecordStore::numRecords(OperationContext* opCtx) const {
- return static_cast<long long>(_numRecords.load());
-}
-
-bool RecordStore::isCapped() const {
- return _isCapped;
-}
-
-void RecordStore::setCappedCallback(CappedCallback* cb) {
- stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
- _cappedCallback = cb;
-}
-
-int64_t RecordStore::storageSize(OperationContext* opCtx,
- BSONObjBuilder* extraInfo,
- int infoLevel) const {
- return dataSize(opCtx);
-}
-
-bool RecordStore::findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const {
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- auto it = workingCopy->find(createKey(_ident, loc.repr()));
- if (it == workingCopy->end()) {
- return false;
- }
- *rd = RecordData(it->second.c_str(), it->second.length());
- return true;
-}
-
-void RecordStore::deleteRecord(OperationContext* opCtx, const RecordId& dl) {
- _initHighestIdIfNeeded(opCtx);
- auto ru = RecoveryUnit::get(opCtx);
- StringStore* workingCopy(ru->getHead());
- SizeAdjuster adjuster(opCtx, this);
- invariant(workingCopy->erase(createKey(_ident, dl.repr())));
- ru->makeDirty();
-}
-
-Status RecordStore::insertRecords(OperationContext* opCtx,
- std::vector<Record>* inOutRecords,
- const std::vector<Timestamp>& timestamps) {
- int64_t totalSize = 0;
- for (auto& record : *inOutRecords)
- totalSize += record.data.size();
-
- // Caller will retry one element at a time.
- if (_isCapped && totalSize > _cappedMaxSize)
- return Status(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
-
- auto ru = RecoveryUnit::get(opCtx);
- StringStore* workingCopy(ru->getHead());
- {
- SizeAdjuster adjuster(opCtx, this);
- for (auto& record : *inOutRecords) {
- int64_t thisRecordId = 0;
- if (_isOplog) {
- StatusWith<RecordId> status =
- oploghack::extractKey(record.data.data(), record.data.size());
- if (!status.isOK())
- return status.getStatus();
- thisRecordId = status.getValue().repr();
- _visibilityManager->addUncommittedRecord(opCtx, this, RecordId(thisRecordId));
- } else {
- thisRecordId = _nextRecordId(opCtx);
- }
- workingCopy->insert(
- StringStore::value_type{createKey(_ident, thisRecordId),
- std::string(record.data.data(), record.data.size())});
- record.id = RecordId(thisRecordId);
- }
- }
- ru->makeDirty();
- _cappedDeleteAsNeeded(opCtx, workingCopy);
- return Status::OK();
-}
-
-Status RecordStore::updateRecord(OperationContext* opCtx,
- const RecordId& oldLocation,
- const char* data,
- int len) {
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- SizeAdjuster adjuster(opCtx, this);
- {
- std::string key = createKey(_ident, oldLocation.repr());
- StringStore::const_iterator it = workingCopy->find(key);
- invariant(it != workingCopy->end());
- workingCopy->update(StringStore::value_type{key, std::string(data, len)});
- }
- _cappedDeleteAsNeeded(opCtx, workingCopy);
- RecoveryUnit::get(opCtx)->makeDirty();
-
- return Status::OK();
-}
-
-bool RecordStore::updateWithDamagesSupported() const {
- // TODO: enable updateWithDamages after writable pointers are complete.
- return false;
-}
-
-StatusWith<RecordData> RecordStore::updateWithDamages(OperationContext* opCtx,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages) {
- return RecordData();
-}
-
-std::unique_ptr<SeekableRecordCursor> RecordStore::getCursor(OperationContext* opCtx,
- bool forward) const {
- if (forward)
- return std::make_unique<Cursor>(opCtx, *this, _visibilityManager);
- return std::make_unique<ReverseCursor>(opCtx, *this, _visibilityManager);
-}
-
-Status RecordStore::truncate(OperationContext* opCtx) {
- SizeAdjuster adjuster(opCtx, this);
- StatusWith<int64_t> s =
- truncateWithoutUpdatingCount(checked_cast<biggie::RecoveryUnit*>(opCtx->recoveryUnit()));
- if (!s.isOK())
- return s.getStatus();
-
- return Status::OK();
-}
-
-StatusWith<int64_t> RecordStore::truncateWithoutUpdatingCount(mongo::RecoveryUnit* ru) {
- auto bRu = checked_cast<biggie::RecoveryUnit*>(ru);
- StringStore* workingCopy(bRu->getHead());
- StringStore::const_iterator end = workingCopy->upper_bound(_postfix);
- std::vector<std::string> toDelete;
-
- for (auto it = workingCopy->lower_bound(_prefix); it != end; ++it) {
- toDelete.push_back(it->first);
- }
-
- if (toDelete.empty())
- return 0;
-
- for (const auto& key : toDelete)
- workingCopy->erase(key);
-
- bRu->makeDirty();
-
- return static_cast<int64_t>(toDelete.size());
-}
-
-void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
- auto ru = RecoveryUnit::get(opCtx);
- StringStore* workingCopy(ru->getHead());
- WriteUnitOfWork wuow(opCtx);
- const auto recordKey = createKey(_ident, end.repr());
- auto recordIt =
- inclusive ? workingCopy->lower_bound(recordKey) : workingCopy->upper_bound(recordKey);
- auto endIt = workingCopy->upper_bound(_postfix);
-
- while (recordIt != endIt) {
- stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
- if (_cappedCallback) {
- // Documents are guaranteed to have a RecordId at the end of the KeyString, unlike
- // unique indexes.
- RecordId rid = extractRecordId(recordIt->first);
- RecordData rd = RecordData(recordIt->second.c_str(), recordIt->second.length());
- uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, rid, rd));
- }
- // Important to scope adjuster until after capped callback, as that changes indexes and
- // would result in those changes being reflected in RecordStore count/size.
- SizeAdjuster adjuster(opCtx, this);
-
- // Don't need to increment the iterator because the iterator gets revalidated and placed on
- // the next item after the erase.
- workingCopy->erase(recordIt->first);
-
- // Tree modifications are bound to happen here so we need to reposition our end cursor.
- endIt.repositionIfChanged();
- ru->makeDirty();
- }
-
- wuow.commit();
-}
-
-void RecordStore::appendCustomStats(OperationContext* opCtx,
- BSONObjBuilder* result,
- double scale) const {
- result->appendBool("capped", _isCapped);
- if (_isCapped) {
- result->appendIntOrLL("max", _cappedMaxDocs);
- result->appendIntOrLL("maxSize", _cappedMaxSize / scale);
- }
-}
-
-void RecordStore::updateStatsAfterRepair(OperationContext* opCtx,
- long long numRecords,
- long long dataSize) {
- // SERVER-38883 This storage engine should instead be able to invariant that stats are correct.
- _numRecords.store(numRecords);
- _dataSize.store(dataSize);
-}
-
-void RecordStore::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const {
- _visibilityManager->waitForAllEarlierOplogWritesToBeVisible(opCtx);
-}
-
-boost::optional<RecordId> RecordStore::oplogStartHack(OperationContext* opCtx,
- const RecordId& startingPosition) const {
- if (!_isOplog)
- return boost::none;
-
- if (numRecords(opCtx) == 0)
- return RecordId();
-
- StringStore* workingCopy{RecoveryUnit::get(opCtx)->getHead()};
-
- std::string key = createKey(_ident, startingPosition.repr());
- StringStore::const_reverse_iterator it(workingCopy->upper_bound(key));
-
- if (it == workingCopy->rend())
- return RecordId();
-
- RecordId rid = RecordId(extractRecordId(it->first));
- if (rid > startingPosition)
- return RecordId();
-
- return rid;
-}
-
-Status RecordStore::oplogDiskLocRegister(OperationContext* opCtx,
- const Timestamp& opTime,
- bool orderedCommit) {
- if (!orderedCommit) {
- return opCtx->recoveryUnit()->setTimestamp(opTime);
- }
-
- return Status::OK();
-}
-
-void RecordStore::_initHighestIdIfNeeded(OperationContext* opCtx) {
- // In the normal case, this will already be initialized, so use a weak load. Since this value
- // will only change from 0 to a positive integer, the only risk is reading an outdated value, 0,
- // and having to take the mutex.
- if (_highestRecordId.loadRelaxed() > 0) {
- return;
- }
-
- // Only one thread needs to do this.
- stdx::lock_guard<Latch> lk(_initHighestIdMutex);
- if (_highestRecordId.load() > 0) {
- return;
- }
-
- // Need to start at 1 so we are always higher than RecordId::min()
- int64_t nextId = 1;
-
- // Find the largest RecordId currently in use.
- std::unique_ptr<SeekableRecordCursor> cursor = getCursor(opCtx, /*forward=*/false);
- if (auto record = cursor->next()) {
- nextId = record->id.repr() + 1;
- }
-
- _highestRecordId.store(nextId);
-};
-
-int64_t RecordStore::_nextRecordId(OperationContext* opCtx) {
- _initHighestIdIfNeeded(opCtx);
- return _highestRecordId.fetchAndAdd(1);
-}
-
-bool RecordStore::_cappedAndNeedDelete(OperationContext* opCtx, StringStore* workingCopy) {
- if (!_isCapped)
- return false;
-
- if (dataSize(opCtx) > _cappedMaxSize)
- return true;
-
- if ((_cappedMaxDocs != -1) && numRecords(opCtx) > _cappedMaxDocs)
- return true;
- return false;
-}
-
-void RecordStore::_cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* workingCopy) {
- if (!_isCapped)
- return;
-
- // Create the lowest key for this identifier and use lower_bound() to get to the first one.
- auto recordIt = workingCopy->lower_bound(_prefix);
-
- // Ensure only one thread at a time can do deletes, otherwise they'll conflict.
- stdx::lock_guard<Latch> cappedDeleterLock(_cappedDeleterMutex);
-
- while (_cappedAndNeedDelete(opCtx, workingCopy)) {
-
- stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
- RecordId rid = RecordId(extractRecordId(recordIt->first));
-
- if (_isOplog && _visibilityManager->isFirstHidden(rid)) {
- // We have a record that hasn't been committed yet, so we shouldn't truncate anymore
- // until it gets committed.
- return;
- }
-
- if (_cappedCallback) {
- RecordData rd = RecordData(recordIt->second.c_str(), recordIt->second.length());
- uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, rid, rd));
- }
-
- SizeAdjuster adjuster(opCtx, this);
- invariant(numRecords(opCtx) > 0, str::stream() << numRecords(opCtx));
-
- // Don't need to increment the iterator because the iterator gets revalidated and placed on
- // the next item after the erase.
- workingCopy->erase(recordIt->first);
- auto ru = RecoveryUnit::get(opCtx);
- ru->makeDirty();
- }
-}
-
-RecordStore::Cursor::Cursor(OperationContext* opCtx,
- const RecordStore& rs,
- VisibilityManager* visibilityManager)
- : opCtx(opCtx), _rs(rs), _visibilityManager(visibilityManager) {
- if (_rs._isOplog) {
- _oplogVisibility = _visibilityManager->getAllCommittedRecord();
- }
-}
-
-boost::optional<Record> RecordStore::Cursor::next() {
- _savedPosition = boost::none;
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- if (_needFirstSeek) {
- _needFirstSeek = false;
- it = workingCopy->lower_bound(_rs._prefix);
- } else if (it != workingCopy->end() && !_lastMoveWasRestore) {
- ++it;
- }
- _lastMoveWasRestore = false;
- if (it != workingCopy->end() && inPrefix(it->first)) {
- _savedPosition = it->first;
- Record nextRecord;
- nextRecord.id = RecordId(extractRecordId(it->first));
- nextRecord.data = RecordData(it->second.c_str(), it->second.length());
-
- if (_rs._isOplog && nextRecord.id > _oplogVisibility) {
- return boost::none;
- }
-
- return nextRecord;
- }
- return boost::none;
-}
-
-boost::optional<Record> RecordStore::Cursor::seekExact(const RecordId& id) {
- _savedPosition = boost::none;
- _lastMoveWasRestore = false;
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- std::string key = createKey(_rs._ident, id.repr());
- it = workingCopy->find(key);
-
- if (it == workingCopy->end() || !inPrefix(it->first))
- return boost::none;
-
- if (_rs._isOplog && id > _oplogVisibility) {
- return boost::none;
- }
-
- _needFirstSeek = false;
- _savedPosition = it->first;
- return Record{id, RecordData(it->second.c_str(), it->second.length())};
-}
-
-// Positions are saved as we go.
-void RecordStore::Cursor::save() {}
-void RecordStore::Cursor::saveUnpositioned() {}
-
-bool RecordStore::Cursor::restore() {
- if (!_savedPosition)
- return true;
-
- // Get oplog visibility before forking working tree to guarantee that nothing gets committed
- // after we've forked that would update oplog visibility
- if (_rs._isOplog) {
- _oplogVisibility = _visibilityManager->getAllCommittedRecord();
- }
-
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- it = workingCopy->lower_bound(_savedPosition.value());
- _lastMoveWasRestore = it == workingCopy->end() || it->first != _savedPosition.value();
-
- // Capped iterators die on invalidation rather than advancing.
- return !(_rs._isCapped && _lastMoveWasRestore);
-}
-
-void RecordStore::Cursor::detachFromOperationContext() {
- invariant(opCtx != nullptr);
- opCtx = nullptr;
-}
-
-void RecordStore::Cursor::reattachToOperationContext(OperationContext* opCtx) {
- invariant(opCtx != nullptr);
- this->opCtx = opCtx;
-}
-
-bool RecordStore::Cursor::inPrefix(const std::string& key_string) {
- return (key_string > _rs._prefix) && (key_string < _rs._postfix);
-}
-
-RecordStore::ReverseCursor::ReverseCursor(OperationContext* opCtx,
- const RecordStore& rs,
- VisibilityManager* visibilityManager)
- : opCtx(opCtx), _rs(rs), _visibilityManager(visibilityManager) {
- _savedPosition = boost::none;
-}
-
-boost::optional<Record> RecordStore::ReverseCursor::next() {
- _savedPosition = boost::none;
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- if (_needFirstSeek) {
- _needFirstSeek = false;
- it = StringStore::const_reverse_iterator(workingCopy->upper_bound(_rs._postfix));
- } else if (it != workingCopy->rend() && !_lastMoveWasRestore) {
- ++it;
- }
- _lastMoveWasRestore = false;
-
- if (it != workingCopy->rend() && inPrefix(it->first)) {
- _savedPosition = it->first;
- Record nextRecord;
- nextRecord.id = RecordId(extractRecordId(it->first));
- nextRecord.data = RecordData(it->second.c_str(), it->second.length());
-
- return nextRecord;
- }
- return boost::none;
-}
-
-boost::optional<Record> RecordStore::ReverseCursor::seekExact(const RecordId& id) {
- _needFirstSeek = false;
- _savedPosition = boost::none;
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- std::string key = createKey(_rs._ident, id.repr());
- StringStore::const_iterator canFind = workingCopy->find(key);
- if (canFind == workingCopy->end() || !inPrefix(canFind->first)) {
- it = workingCopy->rend();
- return boost::none;
- }
-
- it = StringStore::const_reverse_iterator(++canFind); // reverse iterator returns item 1 before
- _savedPosition = it->first;
- return Record{id, RecordData(it->second.c_str(), it->second.length())};
-}
-
-void RecordStore::ReverseCursor::save() {}
-void RecordStore::ReverseCursor::saveUnpositioned() {}
-
-bool RecordStore::ReverseCursor::restore() {
- if (!_savedPosition)
- return true;
-
- StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- it = StringStore::const_reverse_iterator(workingCopy->upper_bound(_savedPosition.value()));
- _lastMoveWasRestore = (it == workingCopy->rend() || it->first != _savedPosition.value());
-
- // Capped iterators die on invalidation rather than advancing.
- return !(_rs._isCapped && _lastMoveWasRestore);
-}
-
-void RecordStore::ReverseCursor::detachFromOperationContext() {
- invariant(opCtx != nullptr);
- opCtx = nullptr;
-}
-
-void RecordStore::ReverseCursor::reattachToOperationContext(OperationContext* opCtx) {
- invariant(opCtx != nullptr);
- this->opCtx = opCtx;
-}
-
-bool RecordStore::ReverseCursor::inPrefix(const std::string& key_string) {
- return (key_string > _rs._prefix) && (key_string < _rs._postfix);
-}
-
-RecordStore::SizeAdjuster::SizeAdjuster(OperationContext* opCtx, RecordStore* rs)
- : _opCtx(opCtx),
- _rs(rs),
- _workingCopy(biggie::RecoveryUnit::get(opCtx)->getHead()),
- _origNumRecords(_workingCopy->size()),
- _origDataSize(_workingCopy->dataSize()) {}
-
-RecordStore::SizeAdjuster::~SizeAdjuster() {
- // SERVER-48981 This implementation of fastcount results in inaccurate values. This storage
- // engine emits write conflict exceptions at commit-time leading to the fastcount to be
- // inaccurate until the rollback happens.
- // If proper local isolation is implemented, SERVER-38883 can also be fulfulled for this storage
- // engine where we can invariant for correct fastcount in updateStatsAfterRepair()
- int64_t deltaNumRecords = _workingCopy->size() - _origNumRecords;
- int64_t deltaDataSize = _workingCopy->dataSize() - _origDataSize;
- _rs->_numRecords.fetchAndAdd(deltaNumRecords);
- _rs->_dataSize.fetchAndAdd(deltaDataSize);
- RecoveryUnit::get(_opCtx)->onRollback([rs = _rs, deltaNumRecords, deltaDataSize]() {
- rs->_numRecords.fetchAndSubtract(deltaNumRecords);
- rs->_dataSize.fetchAndSubtract(deltaDataSize);
- });
-}
-
-} // namespace biggie
-} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.h b/src/mongo/db/storage/biggie/biggie_record_store.h
deleted file mode 100644
index 1a3c862f9d4..00000000000
--- a/src/mongo/db/storage/biggie/biggie_record_store.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <atomic>
-#include <map>
-
-#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/storage/biggie/biggie_visibility_manager.h"
-#include "mongo/db/storage/biggie/store.h"
-#include "mongo/db/storage/capped_callback.h"
-#include "mongo/db/storage/record_store.h"
-#include "mongo/platform/atomic_word.h"
-#include "mongo/platform/mutex.h"
-
-namespace mongo {
-namespace biggie {
-
-// TODO SERVER-38987: Change the name to EphemeralForTest.
-extern const std::string kBiggieEngineName;
-
-/**
- * A RecordStore that stores all data in-memory.
- */
-class RecordStore final : public ::mongo::RecordStore {
-public:
- explicit RecordStore(StringData ns,
- StringData ident,
- bool isCapped = false,
- int64_t cappedMaxSize = -1,
- int64_t cappedMaxDocs = -1,
- CappedCallback* cappedCallback = nullptr,
- VisibilityManager* visibilityManager = nullptr);
- ~RecordStore() = default;
-
- virtual const char* name() const;
- virtual const std::string& getIdent() const;
- virtual long long dataSize(OperationContext* opCtx) const;
- virtual long long numRecords(OperationContext* opCtx) const;
- virtual bool isCapped() const;
- virtual void setCappedCallback(CappedCallback*);
- virtual int64_t storageSize(OperationContext* opCtx,
- BSONObjBuilder* extraInfo = nullptr,
- int infoLevel = 0) const;
-
- virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const;
-
- virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl);
-
- virtual Status insertRecords(OperationContext* opCtx,
- std::vector<Record>* inOutRecords,
- const std::vector<Timestamp>& timestamps);
-
- virtual Status updateRecord(OperationContext* opCtx,
- const RecordId& oldLocation,
- const char* data,
- int len);
-
- virtual bool updateWithDamagesSupported() const;
-
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages);
-
- Status oplogDiskLocRegister(OperationContext* opCtx,
- const Timestamp& opTime,
- bool orderedCommit) override;
-
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
- bool forward) const final;
-
- virtual Status truncate(OperationContext* opCtx);
- StatusWith<int64_t> truncateWithoutUpdatingCount(RecoveryUnit* ru);
-
- virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
-
- virtual void appendCustomStats(OperationContext* opCtx,
- BSONObjBuilder* result,
- double scale) const;
-
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
- const RecordId& startingPosition) const;
-
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override;
-
- virtual void updateStatsAfterRepair(OperationContext* opCtx,
- long long numRecords,
- long long dataSize);
-
-private:
- friend class VisibilityManagerChange;
-
- void _initHighestIdIfNeeded(OperationContext* opCtx);
-
- /**
- * This gets the next (guaranteed) unique record id.
- */
- int64_t _nextRecordId(OperationContext* opCtx);
-
- /**
- * Two helper functions for deleting excess records in capped record stores.
- * The caller should not have an active SizeAdjuster.
- */
- bool _cappedAndNeedDelete(OperationContext* opCtx, StringStore* workingCopy);
- void _cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* workingCopy);
-
- const bool _isCapped;
- const int64_t _cappedMaxSize;
- const int64_t _cappedMaxDocs;
-
- std::string _identStr;
- StringData _ident;
-
- std::string _prefix;
- std::string _postfix;
-
- mutable Mutex _cappedCallbackMutex =
- MONGO_MAKE_LATCH("RecordStore::_cappedCallbackMutex"); // Guards _cappedCallback
- CappedCallback* _cappedCallback;
-
- mutable Mutex _cappedDeleterMutex = MONGO_MAKE_LATCH("RecordStore::_cappedDeleterMutex");
-
- mutable Mutex _initHighestIdMutex = MONGO_MAKE_LATCH("RecordStore::_initHighestIdMutex");
- AtomicWord<long long> _highestRecordId{0};
- AtomicWord<long long> _numRecords{0};
- AtomicWord<long long> _dataSize{0};
-
- std::string generateKey(const uint8_t* key, size_t key_len) const;
-
- bool _isOplog;
- VisibilityManager* _visibilityManager;
-
- /**
- * Automatically adjust the record count and data size based on the size in change of the
- * underlying radix store during the life time of the SizeAdjuster.
- */
- friend class SizeAdjuster;
- class SizeAdjuster {
- public:
- SizeAdjuster(OperationContext* opCtx, RecordStore* rs);
- ~SizeAdjuster();
-
- private:
- OperationContext* const _opCtx;
- RecordStore* const _rs;
- const StringStore* _workingCopy;
- const int64_t _origNumRecords;
- const int64_t _origDataSize;
- };
-
- class Cursor final : public SeekableRecordCursor {
- OperationContext* opCtx;
- const RecordStore& _rs;
- StringStore::const_iterator it;
- boost::optional<std::string> _savedPosition;
- bool _needFirstSeek = true;
- bool _lastMoveWasRestore = false;
- VisibilityManager* _visibilityManager;
- RecordId _oplogVisibility;
-
- public:
- Cursor(OperationContext* opCtx,
- const RecordStore& rs,
- VisibilityManager* visibilityManager);
- boost::optional<Record> next() final;
- boost::optional<Record> seekExact(const RecordId& id) final override;
- void save() final;
- void saveUnpositioned() final override;
- bool restore() final;
- void detachFromOperationContext() final;
- void reattachToOperationContext(OperationContext* opCtx) final;
-
- private:
- bool inPrefix(const std::string& key_string);
- };
-
- class ReverseCursor final : public SeekableRecordCursor {
- OperationContext* opCtx;
- const RecordStore& _rs;
- StringStore::const_reverse_iterator it;
- boost::optional<std::string> _savedPosition;
- bool _needFirstSeek = true;
- bool _lastMoveWasRestore = false;
- VisibilityManager* _visibilityManager;
-
- public:
- ReverseCursor(OperationContext* opCtx,
- const RecordStore& rs,
- VisibilityManager* visibilityManager);
- boost::optional<Record> next() final;
- boost::optional<Record> seekExact(const RecordId& id) final override;
- void save() final;
- void saveUnpositioned() final override;
- bool restore() final;
- void detachFromOperationContext() final;
- void reattachToOperationContext(OperationContext* opCtx) final;
-
- private:
- bool inPrefix(const std::string& key_string);
- };
-};
-
-} // namespace biggie
-} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_record_store_test.cpp b/src/mongo/db/storage/biggie/biggie_record_store_test.cpp
deleted file mode 100644
index e4ecef04da9..00000000000
--- a/src/mongo/db/storage/biggie/biggie_record_store_test.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/biggie/biggie_record_store.h"
-
-#include <memory>
-
-#include "mongo/base/init.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
-#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
-#include "mongo/db/storage/biggie/store.h"
-#include "mongo/db/storage/record_store_test_harness.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-namespace biggie {
-namespace {
-
-class RecordStoreHarnessHelper final : public ::mongo::RecordStoreHarnessHelper {
- KVEngine _kvEngine{};
- VisibilityManager _visibilityManager;
-
-public:
- RecordStoreHarnessHelper() {}
-
- virtual std::unique_ptr<mongo::RecordStore> newNonCappedRecordStore() {
- return newNonCappedRecordStore("a.b");
- }
-
- virtual std::unique_ptr<mongo::RecordStore> newNonCappedRecordStore(const std::string& ns) {
- return std::make_unique<RecordStore>(ns,
- "ident"_sd /* ident */,
- false /* isCapped */,
- -1 /* cappedMaxSize */,
- -1 /* cappedMaxDocs */,
- nullptr /* cappedCallback */,
- nullptr /* visibilityManager */);
- }
-
- virtual std::unique_ptr<mongo::RecordStore> newCappedRecordStore(int64_t cappedSizeBytes,
- int64_t cappedMaxDocs) {
- return newCappedRecordStore("a.b", cappedSizeBytes, cappedMaxDocs);
- }
-
- virtual std::unique_ptr<mongo::RecordStore> newCappedRecordStore(const std::string& ns,
- int64_t cappedSizeBytes,
- int64_t cappedMaxDocs) final {
- return std::make_unique<RecordStore>(ns,
- "ident"_sd,
- /*isCapped*/ true,
- cappedSizeBytes,
- cappedMaxDocs,
- /*cappedCallback*/ nullptr,
- &_visibilityManager);
- }
-
- std::unique_ptr<mongo::RecoveryUnit> newRecoveryUnit() final {
- return std::make_unique<RecoveryUnit>(&_kvEngine);
- }
-
- bool supportsDocLocking() final {
- return true;
- }
-};
-
-std::unique_ptr<mongo::RecordStoreHarnessHelper> makeBiggieRecordStoreHarnessHelper() {
- return std::make_unique<RecordStoreHarnessHelper>();
-}
-
-MONGO_INITIALIZER(RegisterRecordStoreHarnessFactory)(InitializerContext* const) {
- mongo::registerRecordStoreHarnessHelperFactory(makeBiggieRecordStoreHarnessHelper);
- return Status::OK();
-}
-} // namespace
-} // namespace biggie
-} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_recovery_unit.cpp b/src/mongo/db/storage/biggie/biggie_recovery_unit.cpp
deleted file mode 100644
index 4a0676e4685..00000000000
--- a/src/mongo/db/storage/biggie/biggie_recovery_unit.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include <mutex>
-
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
-#include "mongo/db/storage/oplog_hack.h"
-
-namespace mongo {
-namespace biggie {
-
-RecoveryUnit::RecoveryUnit(KVEngine* parentKVEngine, std::function<void()> cb)
- : _waitUntilDurableCallback(cb), _KVEngine(parentKVEngine) {}
-
-RecoveryUnit::~RecoveryUnit() {
- invariant(!_inUnitOfWork(), toString(_getState()));
- _abort();
-}
-
-void RecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
- invariant(!_inUnitOfWork(), toString(_getState()));
- _setState(State::kInactiveInUnitOfWork);
-}
-
-void RecoveryUnit::doCommitUnitOfWork() {
- invariant(_inUnitOfWork(), toString(_getState()));
-
- if (_dirty) {
- invariant(_forked);
- while (true) {
- std::pair<uint64_t, StringStore> masterInfo = _KVEngine->getMasterInfo();
- try {
- _workingCopy.merge3(_mergeBase, masterInfo.second);
- } catch (const merge_conflict_exception&) {
- throw WriteConflictException();
- }
-
- if (_KVEngine->trySwapMaster(_workingCopy, masterInfo.first)) {
- // Merged successfully
- break;
- } else {
- // Retry the merge, but update the mergeBase since some progress was made merging.
- _mergeBase = masterInfo.second;
- }
- }
- _forked = false;
- _dirty = false;
- } else if (_forked) {
- if (kDebugBuild)
- invariant(_mergeBase == _workingCopy);
- }
-
- _setState(State::kCommitting);
- commitRegisteredChanges(boost::none);
- _setState(State::kInactive);
-}
-
-void RecoveryUnit::doAbortUnitOfWork() {
- invariant(_inUnitOfWork(), toString(_getState()));
- _abort();
-}
-
-bool RecoveryUnit::waitUntilDurable(OperationContext* opCtx) {
- invariant(!_inUnitOfWork(), toString(_getState()));
- invariant(!opCtx->lockState()->isLocked() || storageGlobalParams.repair);
- return true; // This is an in-memory storage engine.
-}
-
-void RecoveryUnit::doAbandonSnapshot() {
- invariant(!_inUnitOfWork(), toString(_getState()));
- _forked = false;
- _dirty = false;
-}
-
-bool RecoveryUnit::forkIfNeeded() {
- if (_forked)
- return false;
-
- // Update the copies of the trees when not in a WUOW so cursors can retrieve the latest data.
-
- std::pair<uint64_t, StringStore> masterInfo = _KVEngine->getMasterInfo();
- StringStore master = masterInfo.second;
-
- _mergeBase = master;
- _workingCopy = master;
-
- _forked = true;
- return true;
-}
-
-Status RecoveryUnit::setTimestamp(Timestamp timestamp) {
- auto key = oploghack::keyForOptime(timestamp);
- if (!key.isOK())
- return key.getStatus();
-
- _KVEngine->visibilityManager()->reserveRecord(this, key.getValue());
- return Status::OK();
-}
-void RecoveryUnit::setOrderedCommit(bool orderedCommit) {}
-
-void RecoveryUnit::_abort() {
- _forked = false;
- _dirty = false;
- _setState(State::kAborting);
- abortRegisteredChanges();
- _setState(State::kInactive);
-}
-
-RecoveryUnit* RecoveryUnit::get(OperationContext* opCtx) {
- return checked_cast<biggie::RecoveryUnit*>(opCtx->recoveryUnit());
-}
-} // namespace biggie
-} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_recovery_unit.h b/src/mongo/db/storage/biggie/biggie_recovery_unit.h
deleted file mode 100644
index 7e939f26b56..00000000000
--- a/src/mongo/db/storage/biggie/biggie_recovery_unit.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <functional>
-#include <vector>
-
-#include "mongo/db/record_id.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
-#include "mongo/db/storage/biggie/store.h"
-#include "mongo/db/storage/recovery_unit.h"
-
-namespace mongo {
-namespace biggie {
-
-class RecoveryUnit : public ::mongo::RecoveryUnit {
-public:
- RecoveryUnit(KVEngine* parentKVEngine, std::function<void()> cb = nullptr);
- ~RecoveryUnit();
-
- void beginUnitOfWork(OperationContext* opCtx) override final;
-
- bool inActiveTxn() const {
- return _inUnitOfWork();
- }
-
- virtual bool waitUntilDurable(OperationContext* opCtx) override;
-
- virtual void setOrderedCommit(bool orderedCommit) override;
-
- Status setTimestamp(Timestamp timestamp) override;
-
- // Biggie specific function declarations below.
- StringStore* getHead() {
- forkIfNeeded();
- return &_workingCopy;
- }
-
- inline void makeDirty() {
- _dirty = true;
- }
-
- /**
- * Checks if there already exists a current working copy and merge base; if not fetches
- * one and creates them.
- */
- bool forkIfNeeded();
-
- static RecoveryUnit* get(OperationContext* opCtx);
-
-private:
- void doCommitUnitOfWork() override final;
-
- void doAbortUnitOfWork() override final;
-
- void doAbandonSnapshot() override final;
-
- void _abort();
-
- std::function<void()> _waitUntilDurableCallback;
- // Official master is kept by KVEngine
- KVEngine* _KVEngine;
- StringStore _mergeBase;
- StringStore _workingCopy;
-
- bool _forked = false;
- bool _dirty = false; // Whether or not we have written to this _workingCopy.
-};
-
-} // namespace biggie
-} // namespace mongo
diff --git a/src/mongo/db/storage/devnull/SConscript b/src/mongo/db/storage/devnull/SConscript
index f2963e59380..0214638cd13 100644
--- a/src/mongo/db/storage/devnull/SConscript
+++ b/src/mongo/db/storage/devnull/SConscript
@@ -8,10 +8,12 @@ env.Library(
target='storage_devnull_core',
source=[
'devnull_kv_engine.cpp',
+ 'ephemeral_catalog_record_store.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store',
'$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
+ '$BUILD_DIR/mongo/db/storage/oplog_hack',
+ '$BUILD_DIR/mongo/db/storage/recovery_unit_base',
],
)
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index f70b7ad94fd..e3fd19d320d 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -33,7 +33,7 @@
#include <memory>
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h"
+#include "mongo/db/storage/devnull/ephemeral_catalog_record_store.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/sorted_data_interface.h"
diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp
new file mode 100644
index 00000000000..60029b991a7
--- /dev/null
+++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp
@@ -0,0 +1,593 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
+
+#include "mongo/db/storage/devnull/ephemeral_catalog_record_store.h"
+
+#include <memory>
+
+#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/storage/oplog_hack.h"
+#include "mongo/db/storage/recovery_unit.h"
+#include "mongo/logv2/log.h"
+#include "mongo/util/str.h"
+#include "mongo/util/unowned_ptr.h"
+
+namespace mongo {
+
+using std::shared_ptr;
+
+class EphemeralForTestRecordStore::InsertChange : public RecoveryUnit::Change {
+public:
+ InsertChange(OperationContext* opCtx, Data* data, RecordId loc)
+ : _opCtx(opCtx), _data(data), _loc(loc) {}
+ virtual void commit(boost::optional<Timestamp>) {}
+ virtual void rollback() {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+
+ Records::iterator it = _data->records.find(_loc);
+ if (it != _data->records.end()) {
+ _data->dataSize -= it->second.size;
+ _data->records.erase(it);
+ }
+ }
+
+private:
+ OperationContext* _opCtx;
+ Data* const _data;
+ const RecordId _loc;
+};
+
+// Works for both removes and updates
+class EphemeralForTestRecordStore::RemoveChange : public RecoveryUnit::Change {
+public:
+ RemoveChange(OperationContext* opCtx,
+ Data* data,
+ RecordId loc,
+ const EphemeralForTestRecord& rec)
+ : _opCtx(opCtx), _data(data), _loc(loc), _rec(rec) {}
+
+ virtual void commit(boost::optional<Timestamp>) {}
+ virtual void rollback() {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+
+ Records::iterator it = _data->records.find(_loc);
+ if (it != _data->records.end()) {
+ _data->dataSize -= it->second.size;
+ }
+
+ _data->dataSize += _rec.size;
+ _data->records[_loc] = _rec;
+ }
+
+private:
+ OperationContext* _opCtx;
+ Data* const _data;
+ const RecordId _loc;
+ const EphemeralForTestRecord _rec;
+};
+
+class EphemeralForTestRecordStore::TruncateChange : public RecoveryUnit::Change {
+public:
+ TruncateChange(OperationContext* opCtx, Data* data) : _opCtx(opCtx), _data(data), _dataSize(0) {
+ using std::swap;
+
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ swap(_dataSize, _data->dataSize);
+ swap(_records, _data->records);
+ }
+
+ virtual void commit(boost::optional<Timestamp>) {}
+ virtual void rollback() {
+ using std::swap;
+
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ swap(_dataSize, _data->dataSize);
+ swap(_records, _data->records);
+ }
+
+private:
+ OperationContext* _opCtx;
+ Data* const _data;
+ int64_t _dataSize;
+ Records _records;
+};
+
+class EphemeralForTestRecordStore::Cursor final : public SeekableRecordCursor {
+public:
+ Cursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
+ : _records(rs._data->records), _isCapped(rs.isCapped()) {}
+
+ boost::optional<Record> next() final {
+ if (_needFirstSeek) {
+ _needFirstSeek = false;
+ _it = _records.begin();
+ } else if (!_lastMoveWasRestore && _it != _records.end()) {
+ ++_it;
+ }
+ _lastMoveWasRestore = false;
+
+ if (_it == _records.end())
+ return {};
+ return {{_it->first, _it->second.toRecordData()}};
+ }
+
+ boost::optional<Record> seekExact(const RecordId& id) final {
+ _lastMoveWasRestore = false;
+ _needFirstSeek = false;
+ _it = _records.find(id);
+ if (_it == _records.end())
+ return {};
+ return {{_it->first, _it->second.toRecordData()}};
+ }
+
+ void save() final {
+ if (!_needFirstSeek && !_lastMoveWasRestore)
+ _savedId = _it == _records.end() ? RecordId() : _it->first;
+ }
+
+ void saveUnpositioned() final {
+ _savedId = RecordId();
+ }
+
+ bool restore() final {
+ if (_savedId.isNull()) {
+ _it = _records.end();
+ return true;
+ }
+
+ _it = _records.lower_bound(_savedId);
+ _lastMoveWasRestore = _it == _records.end() || _it->first != _savedId;
+
+ // Capped iterators die on invalidation rather than advancing.
+ return !(_isCapped && _lastMoveWasRestore);
+ }
+
+ void detachFromOperationContext() final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
+
+private:
+ Records::const_iterator _it;
+ bool _needFirstSeek = true;
+ bool _lastMoveWasRestore = false;
+ RecordId _savedId; // Location to restore() to. Null means EOF.
+
+ const EphemeralForTestRecordStore::Records& _records;
+ const bool _isCapped;
+};
+
+class EphemeralForTestRecordStore::ReverseCursor final : public SeekableRecordCursor {
+public:
+ ReverseCursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
+ : _records(rs._data->records), _isCapped(rs.isCapped()) {}
+
+ boost::optional<Record> next() final {
+ if (_needFirstSeek) {
+ _needFirstSeek = false;
+ _it = _records.rbegin();
+ } else if (!_lastMoveWasRestore && _it != _records.rend()) {
+ ++_it;
+ }
+ _lastMoveWasRestore = false;
+
+ if (_it == _records.rend())
+ return {};
+ return {{_it->first, _it->second.toRecordData()}};
+ }
+
+ boost::optional<Record> seekExact(const RecordId& id) final {
+ _lastMoveWasRestore = false;
+ _needFirstSeek = false;
+
+ auto forwardIt = _records.find(id);
+ if (forwardIt == _records.end()) {
+ _it = _records.rend();
+ return {};
+ }
+
+ // The reverse_iterator will point to the preceding element, so increment the base
+ // iterator to make it point past the found element.
+ ++forwardIt;
+ _it = Records::const_reverse_iterator(forwardIt);
+ dassert(_it != _records.rend());
+ dassert(_it->first == id);
+ return {{_it->first, _it->second.toRecordData()}};
+ }
+
+ void save() final {
+ if (!_needFirstSeek && !_lastMoveWasRestore)
+ _savedId = _it == _records.rend() ? RecordId() : _it->first;
+ }
+
+ void saveUnpositioned() final {
+ _savedId = RecordId();
+ }
+
+ bool restore() final {
+ if (_savedId.isNull()) {
+ _it = _records.rend();
+ return true;
+ }
+
+ // Note: upper_bound returns the first entry > _savedId and reverse_iterators
+ // dereference to the element before their base iterator. This combine to make this
+ // dereference to the first element <= _savedId which is what we want here.
+ _it = Records::const_reverse_iterator(_records.upper_bound(_savedId));
+ _lastMoveWasRestore = _it == _records.rend() || _it->first != _savedId;
+
+ // Capped iterators die on invalidation rather than advancing.
+ return !(_isCapped && _lastMoveWasRestore);
+ }
+
+ void detachFromOperationContext() final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
+
+private:
+ Records::const_reverse_iterator _it;
+ bool _needFirstSeek = true;
+ bool _lastMoveWasRestore = false;
+ RecordId _savedId; // Location to restore() to. Null means EOF.
+ const EphemeralForTestRecordStore::Records& _records;
+ const bool _isCapped;
+};
+
+
+//
+// RecordStore
+//
+
+EphemeralForTestRecordStore::EphemeralForTestRecordStore(StringData ns,
+ std::shared_ptr<void>* dataInOut,
+ bool isCapped,
+ int64_t cappedMaxSize,
+ int64_t cappedMaxDocs,
+ CappedCallback* cappedCallback)
+ : RecordStore(ns),
+ _isCapped(isCapped),
+ _cappedMaxSize(cappedMaxSize),
+ _cappedMaxDocs(cappedMaxDocs),
+ _cappedCallback(cappedCallback),
+ _data(*dataInOut ? static_cast<Data*>(dataInOut->get())
+ : new Data(ns, NamespaceString::oplog(ns))) {
+ if (!*dataInOut) {
+ dataInOut->reset(_data); // takes ownership
+ }
+
+ if (_isCapped) {
+ invariant(_cappedMaxSize > 0);
+ invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
+ } else {
+ invariant(_cappedMaxSize == -1);
+ invariant(_cappedMaxDocs == -1);
+ }
+}
+
+const char* EphemeralForTestRecordStore::name() const {
+ return "EphemeralForTest";
+}
+
+RecordData EphemeralForTestRecordStore::dataFor(OperationContext* opCtx,
+ const RecordId& loc) const {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ return recordFor(lock, loc)->toRecordData();
+}
+
+const EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecordStore::recordFor(
+ WithLock, const RecordId& loc) const {
+ Records::const_iterator it = _data->records.find(loc);
+ if (it == _data->records.end()) {
+ LOGV2_ERROR(23720,
+ "EphemeralForTestRecordStore::recordFor cannot find record for {ns}:{loc}",
+ "ns"_attr = ns(),
+ "loc"_attr = loc);
+ }
+ invariant(it != _data->records.end());
+ return &it->second;
+}
+
+EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecordStore::recordFor(
+ WithLock, const RecordId& loc) {
+ Records::iterator it = _data->records.find(loc);
+ if (it == _data->records.end()) {
+ LOGV2_ERROR(23721,
+ "EphemeralForTestRecordStore::recordFor cannot find record for {ns}:{loc}",
+ "ns"_attr = ns(),
+ "loc"_attr = loc);
+ }
+ invariant(it != _data->records.end());
+ return &it->second;
+}
+
+bool EphemeralForTestRecordStore::findRecord(OperationContext* opCtx,
+ const RecordId& loc,
+ RecordData* rd) const {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ Records::const_iterator it = _data->records.find(loc);
+ if (it == _data->records.end()) {
+ return false;
+ }
+ *rd = it->second.toRecordData();
+ return true;
+}
+
+void EphemeralForTestRecordStore::deleteRecord(OperationContext* opCtx, const RecordId& loc) {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+
+ deleteRecord(lock, opCtx, loc);
+}
+
+void EphemeralForTestRecordStore::deleteRecord(WithLock lk,
+ OperationContext* opCtx,
+ const RecordId& loc) {
+ EphemeralForTestRecord* rec = recordFor(lk, loc);
+ opCtx->recoveryUnit()->registerChange(std::make_unique<RemoveChange>(opCtx, _data, loc, *rec));
+ _data->dataSize -= rec->size;
+ invariant(_data->records.erase(loc) == 1);
+}
+
+bool EphemeralForTestRecordStore::cappedAndNeedDelete(WithLock, OperationContext* opCtx) const {
+ if (!_isCapped)
+ return false;
+
+ if (_data->dataSize > _cappedMaxSize)
+ return true;
+
+ if ((_cappedMaxDocs != -1) && (numRecords(opCtx) > _cappedMaxDocs))
+ return true;
+
+ return false;
+}
+
+void EphemeralForTestRecordStore::cappedDeleteAsNeeded(WithLock lk, OperationContext* opCtx) {
+ while (cappedAndNeedDelete(lk, opCtx)) {
+ invariant(!_data->records.empty());
+
+ Records::iterator oldest = _data->records.begin();
+ RecordId id = oldest->first;
+ RecordData data = oldest->second.toRecordData();
+
+ if (_cappedCallback)
+ uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, id, data));
+
+ deleteRecord(lk, opCtx, id);
+ }
+}
+
+StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(WithLock,
+ const char* data,
+ int len) const {
+ StatusWith<RecordId> status = oploghack::extractKey(data, len);
+ if (!status.isOK())
+ return status;
+
+ if (!_data->records.empty() && status.getValue() <= _data->records.rbegin()->first) {
+
+ return StatusWith<RecordId>(ErrorCodes::BadValue,
+ str::stream() << "attempted out-of-order oplog insert of "
+ << status.getValue() << " (oplog last insert was "
+ << _data->records.rbegin()->first << " )");
+ }
+ return status;
+}
+
+Status EphemeralForTestRecordStore::insertRecords(OperationContext* opCtx,
+ std::vector<Record>* inOutRecords,
+ const std::vector<Timestamp>& timestamps) {
+
+ for (auto& record : *inOutRecords) {
+ if (_isCapped && record.data.size() > _cappedMaxSize) {
+ // We use dataSize for capped rollover and we don't want to delete everything if we know
+ // this won't fit.
+ return Status(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
+ }
+ }
+ const auto insertSingleFn = [this, opCtx](Record* record) {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ EphemeralForTestRecord rec(record->data.size());
+ memcpy(rec.data.get(), record->data.data(), record->data.size());
+
+ RecordId loc;
+ if (_data->isOplog) {
+ StatusWith<RecordId> status =
+ extractAndCheckLocForOplog(lock, record->data.data(), record->data.size());
+ if (!status.isOK())
+ return status.getStatus();
+ loc = status.getValue();
+ } else {
+ loc = allocateLoc(lock);
+ }
+
+ _data->dataSize += record->data.size();
+ _data->records[loc] = rec;
+ record->id = loc;
+
+ opCtx->recoveryUnit()->registerChange(std::make_unique<InsertChange>(opCtx, _data, loc));
+ cappedDeleteAsNeeded(lock, opCtx);
+
+ return Status::OK();
+ };
+
+ for (auto& record : *inOutRecords) {
+ auto status = insertSingleFn(&record);
+ if (!status.isOK())
+ return status;
+ }
+
+ return Status::OK();
+}
+
+Status EphemeralForTestRecordStore::updateRecord(OperationContext* opCtx,
+ const RecordId& loc,
+ const char* data,
+ int len) {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ EphemeralForTestRecord* oldRecord = recordFor(lock, loc);
+ int oldLen = oldRecord->size;
+
+ // Documents in capped collections cannot change size. We check that above the storage layer.
+ invariant(!_isCapped || len == oldLen);
+
+ EphemeralForTestRecord newRecord(len);
+ memcpy(newRecord.data.get(), data, len);
+
+ opCtx->recoveryUnit()->registerChange(
+ std::make_unique<RemoveChange>(opCtx, _data, loc, *oldRecord));
+ _data->dataSize += len - oldLen;
+ *oldRecord = newRecord;
+
+ cappedDeleteAsNeeded(lock, opCtx);
+ return Status::OK();
+}
+
+bool EphemeralForTestRecordStore::updateWithDamagesSupported() const {
+ return true;
+}
+
+StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
+ OperationContext* opCtx,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) {
+
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+
+ EphemeralForTestRecord* oldRecord = recordFor(lock, loc);
+ const int len = oldRecord->size;
+
+ EphemeralForTestRecord newRecord(len);
+ memcpy(newRecord.data.get(), oldRecord->data.get(), len);
+
+ opCtx->recoveryUnit()->registerChange(
+ std::make_unique<RemoveChange>(opCtx, _data, loc, *oldRecord));
+ *oldRecord = newRecord;
+
+ cappedDeleteAsNeeded(lock, opCtx);
+
+ char* root = newRecord.data.get();
+ mutablebson::DamageVector::const_iterator where = damages.begin();
+ const mutablebson::DamageVector::const_iterator end = damages.end();
+ for (; where != end; ++where) {
+ const char* sourcePtr = damageSource + where->sourceOffset;
+ char* targetPtr = root + where->targetOffset;
+ std::memcpy(targetPtr, sourcePtr, where->size);
+ }
+
+ *oldRecord = newRecord;
+
+ return newRecord.toRecordData();
+}
+
+std::unique_ptr<SeekableRecordCursor> EphemeralForTestRecordStore::getCursor(
+ OperationContext* opCtx, bool forward) const {
+ if (forward)
+ return std::make_unique<Cursor>(opCtx, *this);
+ return std::make_unique<ReverseCursor>(opCtx, *this);
+}
+
+Status EphemeralForTestRecordStore::truncate(OperationContext* opCtx) {
+ // Unlike other changes, TruncateChange mutates _data on construction to perform the
+ // truncate
+ opCtx->recoveryUnit()->registerChange(std::make_unique<TruncateChange>(opCtx, _data));
+ return Status::OK();
+}
+
+void EphemeralForTestRecordStore::cappedTruncateAfter(OperationContext* opCtx,
+ RecordId end,
+ bool inclusive) {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ Records::iterator it =
+ inclusive ? _data->records.lower_bound(end) : _data->records.upper_bound(end);
+ while (it != _data->records.end()) {
+ RecordId id = it->first;
+ EphemeralForTestRecord record = it->second;
+
+ if (_cappedCallback) {
+ uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, id, record.toRecordData()));
+ }
+
+ opCtx->recoveryUnit()->registerChange(
+ std::make_unique<RemoveChange>(opCtx, _data, id, record));
+ _data->dataSize -= record.size;
+ _data->records.erase(it++);
+ }
+}
+
+void EphemeralForTestRecordStore::appendCustomStats(OperationContext* opCtx,
+ BSONObjBuilder* result,
+ double scale) const {
+ result->appendBool("capped", _isCapped);
+ if (_isCapped) {
+ result->appendIntOrLL("max", _cappedMaxDocs);
+ result->appendIntOrLL("maxSize", _cappedMaxSize / scale);
+ }
+}
+
+int64_t EphemeralForTestRecordStore::storageSize(OperationContext* opCtx,
+ BSONObjBuilder* extraInfo,
+ int infoLevel) const {
+ // Note: not making use of extraInfo or infoLevel since we don't have extents
+ const int64_t recordOverhead = numRecords(opCtx) * sizeof(EphemeralForTestRecord);
+ return _data->dataSize + recordOverhead;
+}
+
+RecordId EphemeralForTestRecordStore::allocateLoc(WithLock) {
+ RecordId out = RecordId(_data->nextId++);
+ invariant(out.isNormal());
+ return out;
+}
+
+boost::optional<RecordId> EphemeralForTestRecordStore::oplogStartHack(
+ OperationContext* opCtx, const RecordId& startingPosition) const {
+ if (!_data->isOplog)
+ return boost::none;
+
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ const Records& records = _data->records;
+
+ if (records.empty())
+ return RecordId();
+
+ Records::const_iterator it = records.lower_bound(startingPosition);
+ if (it == records.end() || it->first > startingPosition) {
+ // If the startingPosition is before the oldest oplog entry, this ensures that we return
+ // RecordId() as specified in record_store.h.
+ if (it == records.begin()) {
+ return RecordId();
+ }
+ --it;
+ }
+
+ return it->first;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
new file mode 100644
index 00000000000..a29bc0fa014
--- /dev/null
+++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
@@ -0,0 +1,189 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <boost/shared_array.hpp>
+#include <map>
+
+#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/storage/capped_callback.h"
+#include "mongo/db/storage/record_store.h"
+#include "mongo/platform/mutex.h"
+#include "mongo/util/concurrency/with_lock.h"
+
+
+namespace mongo {
+
+/**
+ * A RecordStore that stores all data in-memory.
+ *
+ * @param cappedMaxSize - required if isCapped. limit uses dataSize() in this impl.
+ */
+class EphemeralForTestRecordStore : public RecordStore {
+public:
+ explicit EphemeralForTestRecordStore(StringData ns,
+ std::shared_ptr<void>* dataInOut,
+ bool isCapped = false,
+ int64_t cappedMaxSize = -1,
+ int64_t cappedMaxDocs = -1,
+ CappedCallback* cappedCallback = nullptr);
+
+ virtual const char* name() const;
+
+ const std::string& getIdent() const override {
+ return ns();
+ }
+
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
+
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const;
+
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl);
+
+ virtual Status insertRecords(OperationContext* opCtx,
+ std::vector<Record>* inOutRecords,
+ const std::vector<Timestamp>& timestamps);
+
+ virtual Status updateRecord(OperationContext* opCtx,
+ const RecordId& oldLocation,
+ const char* data,
+ int len);
+
+ virtual bool updateWithDamagesSupported() const;
+
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages);
+
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
+ bool forward) const final;
+
+ virtual Status truncate(OperationContext* opCtx);
+
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
+
+ virtual void appendCustomStats(OperationContext* opCtx,
+ BSONObjBuilder* result,
+ double scale) const;
+
+ virtual int64_t storageSize(OperationContext* opCtx,
+ BSONObjBuilder* extraInfo = nullptr,
+ int infoLevel = 0) const;
+
+ virtual long long dataSize(OperationContext* opCtx) const {
+ return _data->dataSize;
+ }
+
+ virtual long long numRecords(OperationContext* opCtx) const {
+ return _data->records.size();
+ }
+
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
+ const RecordId& startingPosition) const;
+
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
+
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
+ long long numRecords,
+ long long dataSize) {
+ stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ invariant(_data->records.size() == size_t(numRecords));
+ _data->dataSize = dataSize;
+ }
+
+protected:
+ struct EphemeralForTestRecord {
+ EphemeralForTestRecord() : size(0) {}
+ EphemeralForTestRecord(int size) : size(size), data(new char[size]) {}
+
+ RecordData toRecordData() const {
+ return RecordData(data.get(), size);
+ }
+
+ int size;
+ boost::shared_array<char> data;
+ };
+
+ virtual const EphemeralForTestRecord* recordFor(WithLock, const RecordId& loc) const;
+ virtual EphemeralForTestRecord* recordFor(WithLock, const RecordId& loc);
+
+public:
+ //
+ // Not in RecordStore interface
+ //
+
+ typedef std::map<RecordId, EphemeralForTestRecord> Records;
+
+ bool isCapped() const {
+ return _isCapped;
+ }
+ void setCappedCallback(CappedCallback* cb) {
+ _cappedCallback = cb;
+ }
+
+private:
+ class InsertChange;
+ class RemoveChange;
+ class TruncateChange;
+
+ class Cursor;
+ class ReverseCursor;
+
+ StatusWith<RecordId> extractAndCheckLocForOplog(WithLock, const char* data, int len) const;
+
+ RecordId allocateLoc(WithLock);
+ bool cappedAndNeedDelete(WithLock, OperationContext* opCtx) const;
+ void cappedDeleteAsNeeded(WithLock lk, OperationContext* opCtx);
+ void deleteRecord(WithLock lk, OperationContext* opCtx, const RecordId& dl);
+
+ // TODO figure out a proper solution to metadata
+ const bool _isCapped;
+ const int64_t _cappedMaxSize;
+ const int64_t _cappedMaxDocs;
+ CappedCallback* _cappedCallback;
+
+ // This is the "persistent" data.
+ struct Data {
+ Data(StringData ns, bool isOplog)
+ : dataSize(0), recordsMutex(), nextId(1), isOplog(isOplog) {}
+
+ int64_t dataSize;
+ stdx::recursive_mutex recordsMutex;
+ Records records;
+ int64_t nextId;
+ const bool isOplog;
+ };
+
+ Data* const _data;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/SConscript b/src/mongo/db/storage/ephemeral_for_test/SConscript
index 966fd417d77..e88fa58bc10 100644
--- a/src/mongo/db/storage/ephemeral_for_test/SConscript
+++ b/src/mongo/db/storage/ephemeral_for_test/SConscript
@@ -1,73 +1,72 @@
-# -*- mode: python -*-
+# -*- mode: python; -*-
+
Import("env")
env = env.Clone()
env.Library(
- target= 'ephemeral_for_test_record_store',
- source= [
- 'ephemeral_for_test_record_store.cpp'
- ],
- LIBDEPS= [
- '$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/db/concurrency/lock_manager',
- '$BUILD_DIR/mongo/db/storage/oplog_hack',
- ]
- )
-
-env.Library(
- target= 'storage_ephemeral_for_test_core',
- source= [
- 'ephemeral_for_test_btree_impl.cpp',
- 'ephemeral_for_test_engine.cpp',
+ target='storage_ephemeral_for_test_core',
+ source=[
+ 'ephemeral_for_test_kv_engine.cpp',
+ 'ephemeral_for_test_record_store.cpp',
'ephemeral_for_test_recovery_unit.cpp',
- ],
- LIBDEPS= [
- 'ephemeral_for_test_record_store',
+ 'ephemeral_for_test_sorted_impl.cpp',
+ 'ephemeral_for_test_visibility_manager.cpp',
+ ],
+ LIBDEPS=[
'$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/db/namespace_string',
- '$BUILD_DIR/mongo/db/catalog/collection_options',
- '$BUILD_DIR/mongo/db/index/index_descriptor',
- '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
+ '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
'$BUILD_DIR/mongo/db/storage/index_entry_comparison',
- '$BUILD_DIR/mongo/db/storage/key_string',
- '$BUILD_DIR/mongo/db/storage/recovery_unit_base',
'$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
- ]
- )
+ '$BUILD_DIR/mongo/db/storage/recovery_unit_base',
+ ],
+ LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/storage/key_string',
+ '$BUILD_DIR/mongo/db/storage/oplog_hack',
+ '$BUILD_DIR/mongo/db/storage/write_unit_of_work',
+ '$BUILD_DIR/mongo/db/commands/server_status',
+ ],
+)
env.Library(
- target= 'storage_ephemeral_for_test',
- source= [
+ target='storage_ephemeral_for_test',
+ source=[
'ephemeral_for_test_init.cpp',
+ 'ephemeral_for_test_server_status.cpp',
],
- LIBDEPS= [
- 'storage_ephemeral_for_test_core',
+ LIBDEPS=[
'$BUILD_DIR/mongo/db/storage/durable_catalog_impl',
'$BUILD_DIR/mongo/db/storage/storage_engine_impl',
+ 'storage_ephemeral_for_test_core',
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/storage/storage_engine_common',
+ '$BUILD_DIR/mongo/db/commands/server_status',
],
)
+# Testing
env.CppUnitTest(
- target='storage_ephemeral_for_test_test',
- source=[
- 'ephemeral_for_test_btree_impl_test.cpp',
- 'ephemeral_for_test_engine_test.cpp',
- 'ephemeral_for_test_record_store_test.cpp',
- ],
- LIBDEPS=[
- 'storage_ephemeral_for_test_core',
- '$BUILD_DIR/mongo/db/storage/kv/kv_engine_test_harness',
- '$BUILD_DIR/mongo/db/storage/record_store_test_harness',
- '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness',
- '$BUILD_DIR/mongo/db/storage/storage_options',
- ],
- LIBDEPS_PRIVATE=[
- '$BUILD_DIR/mongo/db/auth/authmocks',
- '$BUILD_DIR/mongo/db/repl/replmocks',
- '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
- ],
+ target='storage_ephemeral_for_test_test',
+ source=[
+ 'ephemeral_for_test_kv_engine_test.cpp',
+ 'ephemeral_for_test_record_store_test.cpp',
+ 'ephemeral_for_test_recovery_unit_test.cpp',
+ 'ephemeral_for_test_radix_store_test.cpp',
+ 'ephemeral_for_test_radix_store_concurrent_test.cpp',
+ 'ephemeral_for_test_sorted_impl_test.cpp',
+ ],
+ LIBDEPS=[
+ 'storage_ephemeral_for_test_core',
+ '$BUILD_DIR/mongo/db/auth/authmocks',
+ '$BUILD_DIR/mongo/db/common',
+ '$BUILD_DIR/mongo/db/index/index_descriptor',
+ '$BUILD_DIR/mongo/db/repl/replmocks',
+ '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
+ '$BUILD_DIR/mongo/db/storage/key_string',
+ '$BUILD_DIR/mongo/db/storage/kv/kv_engine_test_harness',
+ '$BUILD_DIR/mongo/db/storage/record_store_test_harness',
+ '$BUILD_DIR/mongo/db/storage/recovery_unit_test_harness',
+ '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness',
+ ],
)
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
deleted file mode 100644
index f3160e96033..00000000000
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
+++ /dev/null
@@ -1,657 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.h"
-
-#include <memory>
-#include <set>
-
-#include "mongo/db/catalog/index_catalog_entry.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
-#include "mongo/db/storage/index_entry_comparison.h"
-#include "mongo/db/storage/key_string.h"
-#include "mongo/util/str.h"
-
-namespace mongo {
-
-using std::shared_ptr;
-using std::string;
-using std::vector;
-
-namespace {
-
-typedef std::set<IndexKeyEntry, IndexEntryComparison> IndexSet;
-
-bool keyExists(const IndexSet& data, const BSONObj& key) {
- IndexSet::const_iterator it = data.find(IndexKeyEntry(key, RecordId()));
- return it != data.end();
-}
-
-bool isDup(const IndexSet& data, const BSONObj& key) {
- IndexSet::const_iterator it = data.find(IndexKeyEntry(key, RecordId()));
- if (it == data.end())
- return false;
-
- ++it;
- if (it == data.end())
- return false;
-
- return it->key.woCompare(key, BSONObj(), false) == 0;
-}
-
-class EphemeralForTestBtreeBuilderImpl : public SortedDataBuilderInterface {
-public:
- EphemeralForTestBtreeBuilderImpl(IndexSet* data,
- long long* currentKeySize,
- const Ordering& ordering,
- bool dupsAllowed,
- const NamespaceString& collectionNamespace,
- const std::string& indexName,
- const BSONObj& keyPattern,
- const BSONObj& collation)
- : _data(data),
- _currentKeySize(currentKeySize),
- _ordering(ordering),
- _dupsAllowed(dupsAllowed),
- _comparator(_data->key_comp()),
- _collectionNamespace(collectionNamespace),
- _indexName(indexName),
- _keyPattern(keyPattern),
- _collation(collation) {
- invariant(_data->empty());
- }
-
- Status _addKey(const BSONObj& key, const RecordId& loc) {
- // inserts should be in ascending (key, RecordId) order.
-
- invariant(loc.isValid());
- invariant(!key.hasFieldNames());
-
- if (!_data->empty()) {
- // Compare specified key with last inserted key, ignoring its RecordId
- int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last);
- if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) {
- return Status(ErrorCodes::InternalError,
- "expected ascending (key, RecordId) order in bulk builder");
- } else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) {
- return buildDupKeyErrorStatus(
- key, _collectionNamespace, _indexName, _keyPattern, _collation);
- }
- }
-
- BSONObj owned = key.getOwned();
- _last = _data->insert(_data->end(), IndexKeyEntry(owned, loc));
- *_currentKeySize += key.objsize();
-
- return Status::OK();
- }
-
- Status addKey(const KeyString::Value& keyString) {
- dassert(
- KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()).isValid());
- RecordId loc = KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
-
- auto key = KeyString::toBson(keyString, _ordering);
-
- return _addKey(key, loc);
- }
-
-private:
- IndexSet* const _data;
- long long* _currentKeySize;
- const Ordering& _ordering;
- const bool _dupsAllowed;
-
- IndexEntryComparison _comparator; // used by the bulk builder to detect duplicate keys
- IndexSet::const_iterator _last; // or (key, RecordId) ordering violations
-
- const NamespaceString _collectionNamespace;
- const std::string _indexName;
- const BSONObj _keyPattern;
- const BSONObj _collation;
-};
-
-class EphemeralForTestBtreeImpl : public SortedDataInterface {
-public:
- EphemeralForTestBtreeImpl(IndexSet* data,
- const Ordering& ordering,
- bool isUnique,
- const NamespaceString& collectionNamespace,
- const std::string& indexName,
- const BSONObj& keyPattern,
- const BSONObj& collation)
- : SortedDataInterface(KeyString::Version::kLatestVersion, ordering),
- _data(data),
- _isUnique(isUnique),
- _collectionNamespace(collectionNamespace),
- _indexName(indexName),
- _keyPattern(keyPattern),
- _collation(collation) {
- _currentKeySize = 0;
- }
-
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
- return new EphemeralForTestBtreeBuilderImpl(_data,
- &_currentKeySize,
- _ordering,
- dupsAllowed,
- _collectionNamespace,
- _indexName,
- _keyPattern,
- _collation);
- }
-
- virtual Status insert(OperationContext* opCtx,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
- invariant(loc.isValid());
- invariant(!key.hasFieldNames());
-
-
- // TODO optimization: save the iterator from the dup-check to speed up insert
- if (!dupsAllowed && keyExists(*_data, key))
- return buildDupKeyErrorStatus(
- key, _collectionNamespace, _indexName, _keyPattern, _collation);
-
- IndexKeyEntry entry(key.getOwned(), loc);
- if (_data->insert(entry).second) {
- _currentKeySize += key.objsize();
- opCtx->recoveryUnit()->registerChange(
- std::make_unique<IndexChange>(_data, entry, true));
- }
- return Status::OK();
- }
-
- virtual Status insert(OperationContext* opCtx,
- const KeyString::Value& keyString,
- bool dupsAllowed) {
- RecordId loc = KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
-
- auto key = KeyString::toBson(keyString, _ordering);
-
- return insert(opCtx, key, loc, dupsAllowed);
- }
-
- virtual void unindex(OperationContext* opCtx,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
- invariant(loc.isValid());
- invariant(!key.hasFieldNames());
-
- IndexKeyEntry entry(key.getOwned(), loc);
- const size_t numDeleted = _data->erase(entry);
- invariant(numDeleted <= 1);
- if (numDeleted == 1) {
- _currentKeySize -= key.objsize();
- opCtx->recoveryUnit()->registerChange(
- std::make_unique<IndexChange>(_data, entry, false));
- }
- }
-
- virtual void unindex(OperationContext* opCtx,
- const KeyString::Value& keyString,
- bool dupsAllowed) {
- RecordId loc = KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
-
- auto key = KeyString::toBson(keyString, _ordering);
-
- return unindex(opCtx, key, loc, dupsAllowed);
- }
-
- virtual void fullValidate(OperationContext* opCtx,
- long long* numKeysOut,
- ValidateResults* fullResults) const {
- // TODO check invariants?
- *numKeysOut = _data->size();
- }
-
- virtual bool appendCustomStats(OperationContext* opCtx,
- BSONObjBuilder* output,
- double scale) const {
- return false;
- }
-
- virtual long long getSpaceUsedBytes(OperationContext* opCtx) const {
- return _currentKeySize + (sizeof(IndexKeyEntry) * _data->size());
- }
-
- Status _dupKeyCheck(OperationContext* opCtx, const BSONObj& key) {
- invariant(!key.hasFieldNames());
- if (isDup(*_data, key))
- return buildDupKeyErrorStatus(
- key, _collectionNamespace, _indexName, _keyPattern, _collation);
- return Status::OK();
- }
-
- virtual Status dupKeyCheck(OperationContext* opCtx, const KeyString::Value& keyString) {
- const BSONObj key = KeyString::toBson(
- keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
- return _dupKeyCheck(opCtx, key);
- }
-
- virtual bool isEmpty(OperationContext* opCtx) {
- return _data->empty();
- }
-
- class Cursor final : public SortedDataInterface::Cursor {
- public:
- Cursor(OperationContext* opCtx,
- const IndexSet& data,
- bool isForward,
- bool isUnique,
- const Ordering ordering)
- : _opCtx(opCtx),
- _data(data),
- _forward(isForward),
- _isUnique(isUnique),
- _ordering(ordering),
- _it(data.end()) {}
-
- boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
- if (_lastMoveWasRestore) {
- // Return current position rather than advancing.
- _lastMoveWasRestore = false;
- } else {
- advance();
- if (atEndPoint())
- _isEOF = true;
- }
-
- if (_isEOF)
- return {};
- return *_it;
- }
-
- boost::optional<KeyStringEntry> nextKeyString() override {
- boost::optional<IndexKeyEntry> indexKeyEntry = next(RequestedInfo::kKeyAndLoc);
- if (!indexKeyEntry) {
- return {};
- }
-
- KeyString::Builder builder(
- KeyString::Version::kLatestVersion, indexKeyEntry->key, _ordering);
- builder.appendRecordId(indexKeyEntry->loc);
- return KeyStringEntry(builder.getValueCopy(), indexKeyEntry->loc);
- }
-
- void setEndPosition(const BSONObj& key, bool inclusive) override {
- if (key.isEmpty()) {
- // This means scan to end of index.
- _endState = boost::none;
- return;
- }
-
- // NOTE: this uses the opposite min/max rules as a normal seek because a forward
- // scan should land after the key if inclusive and before if exclusive.
- _endState = EndState(BSONObj::stripFieldNames(key),
- _forward == inclusive ? RecordId::max() : RecordId::min());
- seekEndCursor();
- }
-
- boost::optional<IndexKeyEntry> _seek(const BSONObj& key, bool inclusive, RequestedInfo) {
- if (key.isEmpty()) {
- _it = inclusive ? _data.begin() : _data.end();
- _isEOF = (_it == _data.end());
- if (_isEOF) {
- return {};
- }
- } else {
- const BSONObj query = BSONObj::stripFieldNames(key);
- locate(query, _forward == inclusive ? RecordId::min() : RecordId::max());
- _lastMoveWasRestore = false;
- if (_isEOF)
- return {};
- dassert(inclusive ? compareKeys(_it->key, query) >= 0
- : compareKeys(_it->key, query) > 0);
- }
-
- return *_it;
- }
-
- boost::optional<IndexKeyEntry> seek(
- const KeyString::Value& keyString,
- RequestedInfo parts = RequestedInfo::kKeyAndLoc) override {
- const BSONObj query = KeyString::toBsonSafeWithDiscriminator(
- keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
- if (query.isEmpty()) {
- KeyString::Discriminator discriminator = KeyString::decodeDiscriminator(
- keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
- // Deduce `inclusive` based on `discriminator` and `_forward`.
- bool inclusive = (discriminator == KeyString::Discriminator::kInclusive) ||
- (_forward ^ (discriminator == KeyString::Discriminator::kExclusiveAfter));
- _it = inclusive ? _data.begin() : _data.end();
- _isEOF = (_it == _data.end());
- if (_isEOF) {
- return {};
- }
- return *_it;
- }
- locate(query, _forward ? RecordId::min() : RecordId::max());
- _lastMoveWasRestore = false;
- if (_isEOF)
- return {};
- dassert(compareKeys(_it->key, query) >= 0);
- return *_it;
- }
-
- boost::optional<KeyStringEntry> seekForKeyString(const KeyString::Value& keyStringValue) {
- auto indexKeyEntry = seek(keyStringValue);
- if (indexKeyEntry) {
- KeyString::Builder builder(KeyString::Version::V1, indexKeyEntry->key, _ordering);
- builder.appendRecordId(indexKeyEntry->loc);
- return KeyStringEntry(builder.getValueCopy(), indexKeyEntry->loc);
- } else {
- return {};
- }
- }
-
- boost::optional<IndexKeyEntry> _seekExact(const BSONObj& key, RequestedInfo parts) {
- auto kv = _seek(key, true, parts);
- if (!kv || kv->key.woCompare(key, BSONObj(), /*considerFieldNames*/ false) != 0)
- return {};
-
- if (parts & SortedDataInterface::Cursor::kWantKey) {
- return kv;
- }
- return IndexKeyEntry{{}, kv->loc};
- }
-
- boost::optional<KeyStringEntry> seekExactForKeyString(
- const KeyString::Value& keyStringValue) override {
- const BSONObj query = KeyString::toBson(keyStringValue.getBuffer(),
- keyStringValue.getSize(),
- _ordering,
- keyStringValue.getTypeBits());
- auto kv = _seekExact(query, kKeyAndLoc);
- if (kv) {
- // We have retrived a valid result from _seekExact(). Convert to KeyString
- // and return
- KeyString::Builder ks(KeyString::Version::V1, kv->key, _ordering);
- ks.appendRecordId(kv->loc);
- return KeyStringEntry(ks.getValueCopy(), kv->loc);
- }
- return {};
- }
-
- boost::optional<IndexKeyEntry> seekExact(const KeyString::Value& keyStringValue,
- RequestedInfo parts) override {
- const BSONObj query = KeyString::toBson(keyStringValue.getBuffer(),
- keyStringValue.getSize(),
- _ordering,
- keyStringValue.getTypeBits());
- return _seekExact(query, parts);
- }
-
- void save() override {
- // Keep original position if we haven't moved since the last restore.
- _opCtx = nullptr;
- if (_lastMoveWasRestore)
- return;
-
- if (_isEOF) {
- saveUnpositioned();
- return;
- }
-
- _savedAtEnd = false;
- _savedKey = _it->key.getOwned();
- _savedLoc = _it->loc;
- // Doing nothing with end cursor since it will do full reseek on restore.
- }
-
- void saveUnpositioned() override {
- _savedAtEnd = true;
- // Doing nothing with end cursor since it will do full reseek on restore.
- }
-
- void restore() override {
- // Always do a full seek on restore. We cannot use our last position since index
- // entries may have been inserted closer to our endpoint and we would need to move
- // over them.
- seekEndCursor();
-
- if (_savedAtEnd) {
- _isEOF = true;
- return;
- }
-
- // Need to find our position from the root.
- locate(_savedKey, _savedLoc);
-
- _lastMoveWasRestore = _isEOF; // We weren't EOF but now are.
- if (!_lastMoveWasRestore) {
- // For standard (non-unique) indices, restoring to either a new key or a new record
- // id means that the next key should be the one we just restored to.
- //
- // Cursors for unique indices should never return the same key twice, so we don't
- // consider the restore as having moved the cursor position if the record id
- // changes. In this case we use a null record id so that only the keys are compared.
- auto savedLocToUse = _isUnique ? RecordId() : _savedLoc;
- _lastMoveWasRestore =
- (_data.value_comp().compare(*_it, {_savedKey, savedLocToUse}) != 0);
- }
- }
-
- void detachFromOperationContext() final {
- _opCtx = nullptr;
- }
-
- void reattachToOperationContext(OperationContext* opCtx) final {
- _opCtx = opCtx;
- }
-
- private:
- bool atEndPoint() const {
- return _endState && _it == _endState->it;
- }
-
- // Advances once in the direction of the scan, updating _isEOF as needed.
- // Does nothing if already _isEOF.
- void advance() {
- if (_isEOF)
- return;
- if (_forward) {
- if (_it != _data.end())
- ++_it;
- if (_it == _data.end() || atEndPoint())
- _isEOF = true;
- } else {
- if (_it == _data.begin() || _data.empty()) {
- _isEOF = true;
- } else {
- --_it;
- }
- if (atEndPoint())
- _isEOF = true;
- }
- }
-
- bool atOrPastEndPointAfterSeeking() const {
- if (_isEOF)
- return true;
- if (!_endState)
- return false;
-
- const int cmp = _data.value_comp().compare(*_it, _endState->query);
-
- // We set up _endState->query to be in between the last in-range value and the first
- // out-of-range value. In particular, it is constructed to never equal any legal
- // index key.
- dassert(cmp != 0);
-
- if (_forward) {
- // We may have landed after the end point.
- return cmp > 0;
- } else {
- // We may have landed before the end point.
- return cmp < 0;
- }
- }
-
- void locate(const BSONObj& key, const RecordId& loc) {
- _isEOF = false;
- const auto query = IndexKeyEntry(key, loc);
- _it = _data.lower_bound(query);
- if (_forward) {
- if (_it == _data.end())
- _isEOF = true;
- } else {
- // lower_bound lands us on or after query. Reverse cursors must be on or before.
- if (_it == _data.end() || _data.value_comp().compare(*_it, query) > 0)
- advance(); // sets _isEOF if there is nothing more to return.
- }
-
- if (atOrPastEndPointAfterSeeking())
- _isEOF = true;
- }
-
- // Returns comparison relative to direction of scan. If rhs would be seen later, returns
- // a positive value.
- int compareKeys(const BSONObj& lhs, const BSONObj& rhs) const {
- int cmp = _data.value_comp().compare({lhs, RecordId()}, {rhs, RecordId()});
- return _forward ? cmp : -cmp;
- }
-
- void seekEndCursor() {
- if (!_endState || _data.empty())
- return;
-
- auto it = _data.lower_bound(_endState->query);
- if (!_forward) {
- // lower_bound lands us on or after query. Reverse cursors must be on or before.
- if (it == _data.end() || _data.value_comp().compare(*it, _endState->query) > 0) {
- if (it == _data.begin()) {
- it = _data.end(); // all existing data in range.
- } else {
- --it;
- }
- }
- }
-
- if (it != _data.end())
- dassert(compareKeys(it->key, _endState->query.key) >= 0);
- _endState->it = it;
- }
-
- OperationContext* _opCtx; // not owned
- const IndexSet& _data;
- const bool _forward;
- const bool _isUnique;
- const Ordering _ordering;
- bool _isEOF = true;
- IndexSet::const_iterator _it;
-
- struct EndState {
- EndState(BSONObj key, RecordId loc) : query(std::move(key), loc) {}
-
- IndexKeyEntry query;
- IndexSet::const_iterator it;
- };
- boost::optional<EndState> _endState;
-
- // Used by next to decide to return current position rather than moving. Should be reset
- // to false by any operation that moves the cursor, other than subsequent save/restore
- // pairs.
- bool _lastMoveWasRestore = false;
-
- // For save/restore since _it may be invalidated during a yield.
- bool _savedAtEnd = false;
- BSONObj _savedKey;
- RecordId _savedLoc;
- };
-
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
- bool isForward) const {
- return std::make_unique<Cursor>(opCtx, *_data, isForward, _isUnique, _ordering);
- }
-
- virtual Status initAsEmpty(OperationContext* opCtx) {
- // No-op
- return Status::OK();
- }
-
-private:
- class IndexChange : public RecoveryUnit::Change {
- public:
- IndexChange(IndexSet* data, const IndexKeyEntry& entry, bool insert)
- : _data(data), _entry(entry), _insert(insert) {}
-
- virtual void commit(boost::optional<Timestamp>) {}
- virtual void rollback() {
- if (_insert)
- _data->erase(_entry);
- else
- _data->insert(_entry);
- }
-
- private:
- IndexSet* _data;
- const IndexKeyEntry _entry;
- const bool _insert;
- };
-
- IndexSet* _data;
- long long _currentKeySize;
- const bool _isUnique;
-
- const NamespaceString _collectionNamespace;
- const std::string _indexName;
- const BSONObj _keyPattern;
- const BSONObj _collation;
-};
-} // namespace
-
-// IndexCatalogEntry argument taken by non-const pointer for consistency with other Btree
-// factories. We don't actually modify it.
-std::unique_ptr<SortedDataInterface> getEphemeralForTestBtreeImpl(
- const Ordering& ordering,
- bool isUnique,
- const NamespaceString& collectionNamespace,
- const std::string& indexName,
- const BSONObj& keyPattern,
- const BSONObj& collation,
- std::shared_ptr<void>* dataInOut) {
- invariant(dataInOut);
- if (!*dataInOut) {
- *dataInOut = std::make_shared<IndexSet>(IndexEntryComparison(ordering));
- }
- return std::make_unique<EphemeralForTestBtreeImpl>(static_cast<IndexSet*>(dataInOut->get()),
- ordering,
- isUnique,
- collectionNamespace,
- indexName,
- keyPattern,
- collation);
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.h
deleted file mode 100644
index fe570288cec..00000000000
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-
-#include "mongo/db/storage/sorted_data_interface.h"
-
-#pragma once
-
-namespace mongo {
-
-class IndexCatalogEntry;
-
-/**
- * Caller takes ownership.
- * All permanent data will be stored and fetch from dataInOut.
- */
-std::unique_ptr<SortedDataInterface> getEphemeralForTestBtreeImpl(
- const Ordering& ordering,
- bool isUnique,
- const NamespaceString& collectionNamespace,
- const std::string& indexName,
- const BSONObj& keyPattern,
- const BSONObj& collation,
- std::shared_ptr<void>* dataInOut);
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp
deleted file mode 100644
index 32895759022..00000000000
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.h"
-
-#include <memory>
-
-#include "mongo/base/init.h"
-#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
-#include "mongo/db/storage/sorted_data_interface_test_harness.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-namespace {
-
-class EphemeralForTestBtreeImplTestHarnessHelper final
- : public virtual SortedDataInterfaceHarnessHelper {
-public:
- EphemeralForTestBtreeImplTestHarnessHelper() : _order(Ordering::make(BSONObj())) {}
-
- std::unique_ptr<SortedDataInterface> newIdIndexSortedDataInterface() final {
- BSONObj spec = BSON("key" << BSON("_id" << 1) << "name"
- << "_id_"
- << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "unique" << true);
-
- return std::unique_ptr<SortedDataInterface>(
- getEphemeralForTestBtreeImpl(_order,
- true /* unique */,
- NamespaceString("test.EphemeralForTest"),
- "indexName",
- spec,
- BSONObj{},
- &_data));
- }
-
- std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique, bool partial) final {
- return std::unique_ptr<SortedDataInterface>(
- getEphemeralForTestBtreeImpl(_order,
- unique,
- NamespaceString("test.EphemeralForTest"),
- "indexName",
- BSONObj{},
- BSONObj{},
- &_data));
- }
-
- std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
- return std::make_unique<EphemeralForTestRecoveryUnit>();
- }
-
-private:
- std::shared_ptr<void> _data; // used by EphemeralForTestBtreeImpl
- Ordering _order;
-};
-
-std::unique_ptr<SortedDataInterfaceHarnessHelper> makeEFTHarnessHelper() {
- return std::make_unique<EphemeralForTestBtreeImplTestHarnessHelper>();
-}
-
-MONGO_INITIALIZER(RegisterSortedDataInterfaceHarnessFactory)(InitializerContext* const) {
- mongo::registerSortedDataInterfaceHarnessHelperFactory(makeEFTHarnessHelper);
- return Status::OK();
-}
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
deleted file mode 100644
index dbe6a6f0b36..00000000000
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h"
-
-#include <memory>
-
-#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
-#include "mongo/db/storage/journal_listener.h"
-#include "mongo/logv2/log.h"
-
-namespace mongo {
-
-RecoveryUnit* EphemeralForTestEngine::newRecoveryUnit() {
- return new EphemeralForTestRecoveryUnit([this]() {
- stdx::lock_guard<Latch> lk(_mutex);
- if (_journalListener) {
- JournalListener::Token token = _journalListener->getToken(nullptr);
- _journalListener->onDurable(token);
- }
- });
-}
-
-Status EphemeralForTestEngine::createRecordStore(OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options) {
- // Register the ident in the `_dataMap` (for `getAllIdents`). Remainder of work done in
- // `getRecordStore`.
- stdx::lock_guard<Latch> lk(_mutex);
- _dataMap[ident] = {};
- return Status::OK();
-}
-
-std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore(
- OperationContext* opCtx, StringData ns, StringData ident, const CollectionOptions& options) {
- stdx::lock_guard<Latch> lk(_mutex);
- if (options.capped) {
- return std::make_unique<EphemeralForTestRecordStore>(
- ns,
- &_dataMap[ident],
- true,
- options.cappedSize ? options.cappedSize : kDefaultCappedSizeBytes,
- options.cappedMaxDocs ? options.cappedMaxDocs : -1);
- } else {
- return std::make_unique<EphemeralForTestRecordStore>(ns, &_dataMap[ident]);
- }
-}
-
-std::unique_ptr<RecordStore> EphemeralForTestEngine::makeTemporaryRecordStore(
- OperationContext* opCtx, StringData ident) {
- stdx::lock_guard<Latch> lk(_mutex);
- _dataMap[ident] = {};
- return std::make_unique<EphemeralForTestRecordStore>(ident, &_dataMap[ident]);
-}
-
-Status EphemeralForTestEngine::createSortedDataInterface(OperationContext* opCtx,
- const CollectionOptions& collOptions,
- StringData ident,
- const IndexDescriptor* desc) {
- // Register the ident in `_dataMap` (for `getAllIdents`). Remainder of work done in
- // `getSortedDataInterface`.
- stdx::lock_guard<Latch> lk(_mutex);
- _dataMap[ident] = {};
- return Status::OK();
-}
-
-std::unique_ptr<SortedDataInterface> EphemeralForTestEngine::getSortedDataInterface(
- OperationContext* opCtx, StringData ident, const IndexDescriptor* desc) {
- stdx::lock_guard<Latch> lk(_mutex);
- NamespaceString collNss;
- // Some unit tests don't have actual index entries.
- if (auto entry = desc->getEntry())
- collNss = entry->getNSSFromCatalog(opCtx);
- return getEphemeralForTestBtreeImpl(Ordering::make(desc->keyPattern()),
- desc->unique(),
- collNss,
- desc->indexName(),
- desc->keyPattern(),
- desc->collation(),
- &_dataMap[ident]);
-}
-
-Status EphemeralForTestEngine::dropIdent(OperationContext* opCtx,
- RecoveryUnit* ru,
- StringData ident) {
- stdx::lock_guard<Latch> lk(_mutex);
- _dataMap.erase(ident);
- return Status::OK();
-}
-
-int64_t EphemeralForTestEngine::getIdentSize(OperationContext* opCtx, StringData ident) {
- return 1;
-}
-
-std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext* opCtx) const {
- std::vector<std::string> all;
- {
- stdx::lock_guard<Latch> lk(_mutex);
- for (DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it) {
- all.push_back(it->first);
- }
- }
- return all;
-}
-} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
deleted file mode 100644
index f505ee48655..00000000000
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <memory>
-
-#include "mongo/db/storage/journal_listener.h"
-#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/platform/mutex.h"
-#include "mongo/util/string_map.h"
-
-namespace mongo {
-
-class JournalListener;
-
-class EphemeralForTestEngine : public KVEngine {
-public:
- virtual RecoveryUnit* newRecoveryUnit();
-
- virtual Status createRecordStore(OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options);
-
- virtual std::unique_ptr<RecordStore> getRecordStore(OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options);
-
- virtual std::unique_ptr<RecordStore> makeTemporaryRecordStore(OperationContext* opCtx,
- StringData ident) override;
-
- virtual Status createSortedDataInterface(OperationContext* opCtx,
- const CollectionOptions& collOptions,
- StringData ident,
- const IndexDescriptor* desc);
-
- virtual std::unique_ptr<SortedDataInterface> getSortedDataInterface(
- OperationContext* opCtx, StringData ident, const IndexDescriptor* desc);
-
- virtual Status beginBackup(OperationContext* opCtx) {
- return Status::OK();
- }
-
- virtual void endBackup(OperationContext* opCtx) {}
-
- virtual Status dropIdent(OperationContext* opCtx, RecoveryUnit* ru, StringData ident);
-
- virtual bool supportsDocLocking() const {
- return false;
- }
-
- virtual bool supportsDirectoryPerDB() const {
- return false;
- }
-
- /**
- * Data stored in memory is not durable.
- */
- virtual bool isDurable() const {
- return false;
- }
-
- virtual bool isEphemeral() const {
- return true;
- }
-
- virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident);
-
- virtual Status repairIdent(OperationContext* opCtx, StringData ident) {
- return Status::OK();
- }
-
- virtual void cleanShutdown(){};
-
- virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
- return _dataMap.find(ident) != _dataMap.end();
- }
-
- std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
-
- void setJournalListener(JournalListener* jl) final {
- stdx::unique_lock<Latch> lk(_mutex);
- _journalListener = jl;
- }
-
- virtual Timestamp getAllDurableTimestamp() const override {
- return Timestamp();
- }
-
- virtual Timestamp getOldestOpenReadTimestamp() const override {
- return Timestamp();
- }
-
- boost::optional<Timestamp> getOplogNeededForCrashRecovery() const final {
- return boost::none;
- }
-
-private:
- typedef StringMap<std::shared_ptr<void>> DataMap;
-
- mutable Mutex _mutex = MONGO_MAKE_LATCH("EphemeralForTestEngine::_mutex");
- DataMap _dataMap; // All actual data is owned in here
-
- // Notified when we write as everything is considered "journalled" since repl depends on it.
- JournalListener* _journalListener = nullptr;
-};
-} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp
deleted file mode 100644
index 7e562dac4e5..00000000000
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h"
-
-#include <memory>
-
-#include "mongo/base/init.h"
-#include "mongo/db/repl/replication_coordinator_mock.h"
-#include "mongo/db/service_context_test_fixture.h"
-#include "mongo/db/storage/kv/kv_engine_test_harness.h"
-
-namespace mongo {
-namespace {
-
-class EphemeralForTestKVHarnessHelper : public KVHarnessHelper,
- public ScopedGlobalServiceContextForTest {
-public:
- EphemeralForTestKVHarnessHelper() : _engine(new EphemeralForTestEngine()) {
- repl::ReplicationCoordinator::set(
- getGlobalServiceContext(),
- std::unique_ptr<repl::ReplicationCoordinator>(new repl::ReplicationCoordinatorMock(
- getGlobalServiceContext(), repl::ReplSettings())));
- }
-
- virtual KVEngine* restartEngine() {
- // Intentionally not restarting since the in-memory storage engine
- // does not persist data across restarts
- return _engine.get();
- }
-
- virtual KVEngine* getEngine() {
- return _engine.get();
- }
-
-private:
- std::unique_ptr<EphemeralForTestEngine> _engine;
-};
-
-std::unique_ptr<KVHarnessHelper> makeHelper() {
- return std::make_unique<EphemeralForTestKVHarnessHelper>();
-}
-
-MONGO_INITIALIZER(RegisterKVHarnessFactory)(InitializerContext*) {
- KVHarnessHelper::registerFactory(makeHelper);
- return Status::OK();
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp
index c7780eefed7..4c98f471bfc 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp
@@ -29,34 +29,50 @@
#include "mongo/platform/basic.h"
+#include "mongo/base/init.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.h"
#include "mongo/db/storage/storage_engine_impl.h"
#include "mongo/db/storage/storage_engine_init.h"
#include "mongo/db/storage/storage_options.h"
+#if __has_feature(address_sanitizer)
+#include <sanitizer/lsan_interface.h>
+#endif
+
namespace mongo {
+namespace ephemeral_for_test {
namespace {
-
-class EphemeralForTestFactory : public StorageEngine::Factory {
+class EphemeralForTestStorageEngineFactory : public StorageEngine::Factory {
public:
- virtual ~EphemeralForTestFactory() {}
virtual std::unique_ptr<StorageEngine> create(const StorageGlobalParams& params,
const StorageEngineLockFile* lockFile) const {
- uassert(ErrorCodes::InvalidOptions,
- "ephemeralForTest does not support --groupCollections",
- !params.groupCollections);
+ auto kv = std::make_unique<KVEngine>();
+ // We must only add the server parameters to the global registry once during unit testing.
+ static int setupCountForUnitTests = 0;
+ if (setupCountForUnitTests == 0) {
+ ++setupCountForUnitTests;
+
+ // Intentionally leaked.
+ MONGO_COMPILER_VARIABLE_UNUSED auto leakedSection = new ServerStatusSection(kv.get());
+
+ // This allows unit tests to run this code without encountering memory leaks
+#if __has_feature(address_sanitizer)
+ __lsan_ignore_object(leakedSection);
+#endif
+ }
StorageEngineOptions options;
options.directoryPerDB = params.directoryperdb;
options.forRepair = params.repair;
- return std::make_unique<StorageEngineImpl>(std::make_unique<EphemeralForTestEngine>(),
- options);
+ return std::make_unique<StorageEngineImpl>(std::move(kv), options);
}
virtual StringData getCanonicalName() const {
- return "ephemeralForTest";
+ return kEngineName;
}
virtual Status validateMetadata(const StorageEngineMetadata& metadata,
@@ -69,10 +85,12 @@ public:
}
};
+
ServiceContext::ConstructorActionRegisterer registerEphemeralForTest(
"RegisterEphemeralForTestEngine", [](ServiceContext* service) {
- registerStorageEngine(service, std::make_unique<EphemeralForTestFactory>());
+ registerStorageEngine(service, std::make_unique<EphemeralForTestStorageEngineFactory>());
});
} // namespace
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.cpp
index 52ff2400224..2801e7153f8 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.cpp
@@ -31,18 +31,18 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
#include <memory>
#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/sorted_data_interface.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
KVEngine::KVEngine()
: mongo::KVEngine(), _visibilityManager(std::make_unique<VisibilityManager>()) {}
@@ -164,5 +164,5 @@ public:
void detachFromOperationContext() final {}
void reattachToOperationContext(OperationContext* opCtx) final {}
};
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h
index be6de7a0d03..e5739d388f8 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h
@@ -33,17 +33,19 @@
#include <mutex>
#include <set>
-#include "mongo/db/storage/biggie/biggie_record_store.h"
-#include "mongo/db/storage/biggie/biggie_sorted_impl.h"
-#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.h"
#include "mongo/db/storage/kv/kv_engine.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
+
+static constexpr char kEngineName[] = "ephemeralForTest";
class JournalListener;
/**
- * The biggie storage engine is intended for unit and performance testing.
+ * The ephemeral for test storage engine is intended for unit and performance testing.
*/
class KVEngine : public mongo::KVEngine {
public:
@@ -94,7 +96,7 @@ public:
}
/**
- * Biggie does not write to disk.
+ * Ephemeral for test does not write to disk.
*/
virtual bool isDurable() const {
return false;
@@ -141,7 +143,7 @@ public:
return boost::none;
}
- // Biggie Specific
+ // Ephemeral for test Specific
/**
* Returns a pair of the current version and copy of tree of the master.
@@ -172,5 +174,5 @@ private:
StringStore _master;
uint64_t _masterVersion = 0;
};
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp
index fb75b79f7b3..c7322bd8951 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine_test.cpp
@@ -37,15 +37,15 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context.h"
#include "mongo/db/service_context_test_fixture.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
-class BiggieKVHarnessHelper : public KVHarnessHelper, public ScopedGlobalServiceContextForTest {
+class KVHarnessHelper : public mongo::KVHarnessHelper, public ScopedGlobalServiceContextForTest {
public:
- BiggieKVHarnessHelper() {
+ KVHarnessHelper() {
invariant(hasGlobalServiceContext());
_engine = std::make_unique<KVEngine>();
repl::ReplicationCoordinator::set(
@@ -66,14 +66,14 @@ private:
std::unique_ptr<KVEngine> _engine;
};
-std::unique_ptr<KVHarnessHelper> makeHelper() {
- return std::make_unique<BiggieKVHarnessHelper>();
+std::unique_ptr<mongo::KVHarnessHelper> makeHelper() {
+ return std::make_unique<KVHarnessHelper>();
}
-MONGO_INITIALIZER(RegisterKVHarnessFactory)(InitializerContext*) {
+MONGO_INITIALIZER(RegisterEphemeralForTestKVHarnessFactory)(InitializerContext*) {
KVHarnessHelper::registerFactory(makeHelper);
return Status::OK();
}
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h
index 981d8a3a2c1..fb4c43eb91a 100644
--- a/src/mongo/db/storage/biggie/store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h
@@ -43,7 +43,7 @@
#include "mongo/util/assert_util.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
class merge_conflict_exception : std::exception {
@@ -1655,5 +1655,5 @@ template <class Key, class T>
Metrics RadixStore<Key, T>::_metrics;
using StringStore = RadixStore<std::string, std::string>;
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/radix_store_concurrent_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store_concurrent_test.cpp
index 5bc05e1d370..f63379ff2a5 100644
--- a/src/mongo/db/storage/biggie/radix_store_concurrent_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store_concurrent_test.cpp
@@ -31,7 +31,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
#include "mongo/logv2/log.h"
#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
@@ -42,7 +42,7 @@
#include <tuple>
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
// Helper fixture to run radix tree modifications in parallel on different threads. The result will
// be merged into a master tree and any merge conflicts will be retried. The fixture remembers the
@@ -408,5 +408,5 @@ TEST_F(ConcurrentRadixStoreTestNineThreads, InsertEraseUpdateSameBranch) {
}
}
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/store_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store_test.cpp
index 43e89daafca..d0a005850bc 100644
--- a/src/mongo/db/storage/biggie/store_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store_test.cpp
@@ -31,11 +31,11 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
using value_type = StringStore::value_type;
@@ -2770,5 +2770,5 @@ TEST_F(RadixStoreTest, LowerBoundEndpoint) {
ASSERT_TRUE(it == thisStore.end());
}
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
index e7145216cf2..d0165a5e678 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
@@ -29,575 +29,586 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
+#include "mongo/platform/basic.h"
+
#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h"
+#include <cstring>
#include <memory>
+#include <utility>
-#include "mongo/db/jsobj.h"
-#include "mongo/db/namespace_string.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.h"
+#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/oplog_hack.h"
-#include "mongo/db/storage/recovery_unit.h"
-#include "mongo/logv2/log.h"
-#include "mongo/util/fail_point.h"
-#include "mongo/util/str.h"
-#include "mongo/util/unowned_ptr.h"
+#include "mongo/db/storage/write_unit_of_work.h"
+#include "mongo/util/hex.h"
namespace mongo {
+namespace ephemeral_for_test {
+namespace {
+Ordering allAscending = Ordering::make(BSONObj());
+auto const version = KeyString::Version::V1;
+BSONObj const sample = BSON(""
+ << "s"
+ << "" << (int64_t)0);
+
+std::string createKey(StringData ident, int64_t recordId) {
+ KeyString::Builder ks(version, BSON("" << ident << "" << recordId), allAscending);
+ return std::string(ks.getBuffer(), ks.getSize());
+}
-MONGO_FAIL_POINT_DEFINE(ephemeralForTestReturnIncorrectNumRecords);
-
-using std::shared_ptr;
-
-class EphemeralForTestRecordStore::InsertChange : public RecoveryUnit::Change {
-public:
- InsertChange(OperationContext* opCtx, Data* data, RecordId loc)
- : _opCtx(opCtx), _data(data), _loc(loc) {}
- virtual void commit(boost::optional<Timestamp>) {}
- virtual void rollback() {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
-
- Records::iterator it = _data->records.find(_loc);
- if (it != _data->records.end()) {
- _data->dataSize -= it->second.size;
- _data->records.erase(it);
- }
+RecordId extractRecordId(const std::string& keyStr) {
+ KeyString::Builder ks(version, sample, allAscending);
+ ks.resetFromBuffer(keyStr.c_str(), keyStr.size());
+ BSONObj obj = KeyString::toBson(keyStr.c_str(), keyStr.size(), allAscending, ks.getTypeBits());
+ auto it = BSONObjIterator(obj);
+ ++it;
+ return RecordId((*it).Long());
+}
+} // namespace
+
+RecordStore::RecordStore(StringData ns,
+ StringData ident,
+ bool isCapped,
+ int64_t cappedMaxSize,
+ int64_t cappedMaxDocs,
+ CappedCallback* cappedCallback,
+ VisibilityManager* visibilityManager)
+ : mongo::RecordStore(ns),
+ _isCapped(isCapped),
+ _cappedMaxSize(cappedMaxSize),
+ _cappedMaxDocs(cappedMaxDocs),
+ _identStr(ident.rawData(), ident.size()),
+ _ident(_identStr.data(), _identStr.size()),
+ _prefix(createKey(_ident, std::numeric_limits<int64_t>::min())),
+ _postfix(createKey(_ident, std::numeric_limits<int64_t>::max())),
+ _cappedCallback(cappedCallback),
+ _isOplog(NamespaceString::oplog(ns)),
+ _visibilityManager(visibilityManager) {
+ if (_isCapped) {
+ invariant(_cappedMaxSize > 0);
+ invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
+ } else {
+ invariant(_cappedMaxSize == -1);
+ invariant(_cappedMaxDocs == -1);
}
+}
-private:
- OperationContext* _opCtx;
- Data* const _data;
- const RecordId _loc;
-};
+const char* RecordStore::name() const {
+ return kEngineName;
+}
-// Works for both removes and updates
-class EphemeralForTestRecordStore::RemoveChange : public RecoveryUnit::Change {
-public:
- RemoveChange(OperationContext* opCtx,
- Data* data,
- RecordId loc,
- const EphemeralForTestRecord& rec)
- : _opCtx(opCtx), _data(data), _loc(loc), _rec(rec) {}
-
- virtual void commit(boost::optional<Timestamp>) {}
- virtual void rollback() {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
-
- Records::iterator it = _data->records.find(_loc);
- if (it != _data->records.end()) {
- _data->dataSize -= it->second.size;
- }
+const std::string& RecordStore::getIdent() const {
+ return _identStr;
+}
- _data->dataSize += _rec.size;
- _data->records[_loc] = _rec;
- }
+long long RecordStore::dataSize(OperationContext* opCtx) const {
+ return _dataSize.load();
+}
-private:
- OperationContext* _opCtx;
- Data* const _data;
- const RecordId _loc;
- const EphemeralForTestRecord _rec;
-};
+long long RecordStore::numRecords(OperationContext* opCtx) const {
+ return static_cast<long long>(_numRecords.load());
+}
-class EphemeralForTestRecordStore::TruncateChange : public RecoveryUnit::Change {
-public:
- TruncateChange(OperationContext* opCtx, Data* data) : _opCtx(opCtx), _data(data), _dataSize(0) {
- using std::swap;
+bool RecordStore::isCapped() const {
+ return _isCapped;
+}
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- swap(_dataSize, _data->dataSize);
- swap(_records, _data->records);
- }
+void RecordStore::setCappedCallback(CappedCallback* cb) {
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
+ _cappedCallback = cb;
+}
- virtual void commit(boost::optional<Timestamp>) {}
- virtual void rollback() {
- using std::swap;
+int64_t RecordStore::storageSize(OperationContext* opCtx,
+ BSONObjBuilder* extraInfo,
+ int infoLevel) const {
+ return dataSize(opCtx);
+}
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- swap(_dataSize, _data->dataSize);
- swap(_records, _data->records);
+bool RecordStore::findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const {
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ auto it = workingCopy->find(createKey(_ident, loc.repr()));
+ if (it == workingCopy->end()) {
+ return false;
}
+ *rd = RecordData(it->second.c_str(), it->second.length());
+ return true;
+}
-private:
- OperationContext* _opCtx;
- Data* const _data;
- int64_t _dataSize;
- Records _records;
-};
+void RecordStore::deleteRecord(OperationContext* opCtx, const RecordId& dl) {
+ _initHighestIdIfNeeded(opCtx);
+ auto ru = RecoveryUnit::get(opCtx);
+ StringStore* workingCopy(ru->getHead());
+ SizeAdjuster adjuster(opCtx, this);
+ invariant(workingCopy->erase(createKey(_ident, dl.repr())));
+ ru->makeDirty();
+}
-class EphemeralForTestRecordStore::Cursor final : public SeekableRecordCursor {
-public:
- Cursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
- : _records(rs._data->records), _isCapped(rs.isCapped()) {}
-
- boost::optional<Record> next() final {
- if (_needFirstSeek) {
- _needFirstSeek = false;
- _it = _records.begin();
- } else if (!_lastMoveWasRestore && _it != _records.end()) {
- ++_it;
+Status RecordStore::insertRecords(OperationContext* opCtx,
+ std::vector<Record>* inOutRecords,
+ const std::vector<Timestamp>& timestamps) {
+ int64_t totalSize = 0;
+ for (auto& record : *inOutRecords)
+ totalSize += record.data.size();
+
+ // Caller will retry one element at a time.
+ if (_isCapped && totalSize > _cappedMaxSize)
+ return Status(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
+
+ auto ru = RecoveryUnit::get(opCtx);
+ StringStore* workingCopy(ru->getHead());
+ {
+ SizeAdjuster adjuster(opCtx, this);
+ for (auto& record : *inOutRecords) {
+ int64_t thisRecordId = 0;
+ if (_isOplog) {
+ StatusWith<RecordId> status =
+ oploghack::extractKey(record.data.data(), record.data.size());
+ if (!status.isOK())
+ return status.getStatus();
+ thisRecordId = status.getValue().repr();
+ _visibilityManager->addUncommittedRecord(opCtx, this, RecordId(thisRecordId));
+ } else {
+ thisRecordId = _nextRecordId(opCtx);
+ }
+ workingCopy->insert(
+ StringStore::value_type{createKey(_ident, thisRecordId),
+ std::string(record.data.data(), record.data.size())});
+ record.id = RecordId(thisRecordId);
}
- _lastMoveWasRestore = false;
-
- if (_it == _records.end())
- return {};
- return {{_it->first, _it->second.toRecordData()}};
}
+ ru->makeDirty();
+ _cappedDeleteAsNeeded(opCtx, workingCopy);
+ return Status::OK();
+}
- boost::optional<Record> seekExact(const RecordId& id) final {
- _lastMoveWasRestore = false;
- _needFirstSeek = false;
- _it = _records.find(id);
- if (_it == _records.end())
- return {};
- return {{_it->first, _it->second.toRecordData()}};
+Status RecordStore::updateRecord(OperationContext* opCtx,
+ const RecordId& oldLocation,
+ const char* data,
+ int len) {
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ SizeAdjuster adjuster(opCtx, this);
+ {
+ std::string key = createKey(_ident, oldLocation.repr());
+ StringStore::const_iterator it = workingCopy->find(key);
+ invariant(it != workingCopy->end());
+ workingCopy->update(StringStore::value_type{key, std::string(data, len)});
}
+ _cappedDeleteAsNeeded(opCtx, workingCopy);
+ RecoveryUnit::get(opCtx)->makeDirty();
- void save() final {
- if (!_needFirstSeek && !_lastMoveWasRestore)
- _savedId = _it == _records.end() ? RecordId() : _it->first;
- }
+ return Status::OK();
+}
- void saveUnpositioned() final {
- _savedId = RecordId();
- }
+bool RecordStore::updateWithDamagesSupported() const {
+ // TODO: enable updateWithDamages after writable pointers are complete.
+ return false;
+}
- bool restore() final {
- if (_savedId.isNull()) {
- _it = _records.end();
- return true;
- }
+StatusWith<RecordData> RecordStore::updateWithDamages(OperationContext* opCtx,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) {
+ return RecordData();
+}
+
+std::unique_ptr<SeekableRecordCursor> RecordStore::getCursor(OperationContext* opCtx,
+ bool forward) const {
+ if (forward)
+ return std::make_unique<Cursor>(opCtx, *this, _visibilityManager);
+ return std::make_unique<ReverseCursor>(opCtx, *this, _visibilityManager);
+}
- _it = _records.lower_bound(_savedId);
- _lastMoveWasRestore = _it == _records.end() || _it->first != _savedId;
+Status RecordStore::truncate(OperationContext* opCtx) {
+ SizeAdjuster adjuster(opCtx, this);
+ StatusWith<int64_t> s = truncateWithoutUpdatingCount(
+ checked_cast<ephemeral_for_test::RecoveryUnit*>(opCtx->recoveryUnit()));
+ if (!s.isOK())
+ return s.getStatus();
- // Capped iterators die on invalidation rather than advancing.
- return !(_isCapped && _lastMoveWasRestore);
- }
+ return Status::OK();
+}
- void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* opCtx) final {}
+StatusWith<int64_t> RecordStore::truncateWithoutUpdatingCount(mongo::RecoveryUnit* ru) {
+ auto bRu = checked_cast<ephemeral_for_test::RecoveryUnit*>(ru);
+ StringStore* workingCopy(bRu->getHead());
+ StringStore::const_iterator end = workingCopy->upper_bound(_postfix);
+ std::vector<std::string> toDelete;
-private:
- Records::const_iterator _it;
- bool _needFirstSeek = true;
- bool _lastMoveWasRestore = false;
- RecordId _savedId; // Location to restore() to. Null means EOF.
+ for (auto it = workingCopy->lower_bound(_prefix); it != end; ++it) {
+ toDelete.push_back(it->first);
+ }
- const EphemeralForTestRecordStore::Records& _records;
- const bool _isCapped;
-};
+ if (toDelete.empty())
+ return 0;
-class EphemeralForTestRecordStore::ReverseCursor final : public SeekableRecordCursor {
-public:
- ReverseCursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
- : _records(rs._data->records), _isCapped(rs.isCapped()) {}
-
- boost::optional<Record> next() final {
- if (_needFirstSeek) {
- _needFirstSeek = false;
- _it = _records.rbegin();
- } else if (!_lastMoveWasRestore && _it != _records.rend()) {
- ++_it;
- }
- _lastMoveWasRestore = false;
+ for (const auto& key : toDelete)
+ workingCopy->erase(key);
- if (_it == _records.rend())
- return {};
- return {{_it->first, _it->second.toRecordData()}};
- }
+ bRu->makeDirty();
- boost::optional<Record> seekExact(const RecordId& id) final {
- _lastMoveWasRestore = false;
- _needFirstSeek = false;
+ return static_cast<int64_t>(toDelete.size());
+}
- auto forwardIt = _records.find(id);
- if (forwardIt == _records.end()) {
- _it = _records.rend();
- return {};
+void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
+ auto ru = RecoveryUnit::get(opCtx);
+ StringStore* workingCopy(ru->getHead());
+ WriteUnitOfWork wuow(opCtx);
+ const auto recordKey = createKey(_ident, end.repr());
+ auto recordIt =
+ inclusive ? workingCopy->lower_bound(recordKey) : workingCopy->upper_bound(recordKey);
+ auto endIt = workingCopy->upper_bound(_postfix);
+
+ while (recordIt != endIt) {
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
+ if (_cappedCallback) {
+ // Documents are guaranteed to have a RecordId at the end of the KeyString, unlike
+ // unique indexes.
+ RecordId rid = extractRecordId(recordIt->first);
+ RecordData rd = RecordData(recordIt->second.c_str(), recordIt->second.length());
+ uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, rid, rd));
}
+ // Important to scope adjuster until after capped callback, as that changes indexes and
+ // would result in those changes being reflected in RecordStore count/size.
+ SizeAdjuster adjuster(opCtx, this);
- // The reverse_iterator will point to the preceding element, so increment the base
- // iterator to make it point past the found element.
- ++forwardIt;
- _it = Records::const_reverse_iterator(forwardIt);
- dassert(_it != _records.rend());
- dassert(_it->first == id);
- return {{_it->first, _it->second.toRecordData()}};
- }
+ // Don't need to increment the iterator because the iterator gets revalidated and placed on
+ // the next item after the erase.
+ workingCopy->erase(recordIt->first);
- void save() final {
- if (!_needFirstSeek && !_lastMoveWasRestore)
- _savedId = _it == _records.rend() ? RecordId() : _it->first;
+ // Tree modifications are bound to happen here so we need to reposition our end cursor.
+ endIt.repositionIfChanged();
+ ru->makeDirty();
}
- void saveUnpositioned() final {
- _savedId = RecordId();
+ wuow.commit();
+}
+
+void RecordStore::appendCustomStats(OperationContext* opCtx,
+ BSONObjBuilder* result,
+ double scale) const {
+ result->appendBool("capped", _isCapped);
+ if (_isCapped) {
+ result->appendIntOrLL("max", _cappedMaxDocs);
+ result->appendIntOrLL("maxSize", _cappedMaxSize / scale);
}
+}
- bool restore() final {
- if (_savedId.isNull()) {
- _it = _records.rend();
- return true;
- }
+void RecordStore::updateStatsAfterRepair(OperationContext* opCtx,
+ long long numRecords,
+ long long dataSize) {
+ // SERVER-38883 This storage engine should instead be able to invariant that stats are correct.
+ _numRecords.store(numRecords);
+ _dataSize.store(dataSize);
+}
- // Note: upper_bound returns the first entry > _savedId and reverse_iterators
- // dereference to the element before their base iterator. This combine to make this
- // dereference to the first element <= _savedId which is what we want here.
- _it = Records::const_reverse_iterator(_records.upper_bound(_savedId));
- _lastMoveWasRestore = _it == _records.rend() || _it->first != _savedId;
+void RecordStore::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const {
+ _visibilityManager->waitForAllEarlierOplogWritesToBeVisible(opCtx);
+}
- // Capped iterators die on invalidation rather than advancing.
- return !(_isCapped && _lastMoveWasRestore);
- }
+boost::optional<RecordId> RecordStore::oplogStartHack(OperationContext* opCtx,
+ const RecordId& startingPosition) const {
+ if (!_isOplog)
+ return boost::none;
- void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* opCtx) final {}
+ if (numRecords(opCtx) == 0)
+ return RecordId();
-private:
- Records::const_reverse_iterator _it;
- bool _needFirstSeek = true;
- bool _lastMoveWasRestore = false;
- RecordId _savedId; // Location to restore() to. Null means EOF.
- const EphemeralForTestRecordStore::Records& _records;
- const bool _isCapped;
-};
+ StringStore* workingCopy{RecoveryUnit::get(opCtx)->getHead()};
+ std::string key = createKey(_ident, startingPosition.repr());
+ StringStore::const_reverse_iterator it(workingCopy->upper_bound(key));
-//
-// RecordStore
-//
+ if (it == workingCopy->rend())
+ return RecordId();
-EphemeralForTestRecordStore::EphemeralForTestRecordStore(StringData ns,
- std::shared_ptr<void>* dataInOut,
- bool isCapped,
- int64_t cappedMaxSize,
- int64_t cappedMaxDocs,
- CappedCallback* cappedCallback)
- : RecordStore(ns),
- _isCapped(isCapped),
- _cappedMaxSize(cappedMaxSize),
- _cappedMaxDocs(cappedMaxDocs),
- _cappedCallback(cappedCallback),
- _data(*dataInOut ? static_cast<Data*>(dataInOut->get())
- : new Data(ns, NamespaceString::oplog(ns))) {
- if (!*dataInOut) {
- dataInOut->reset(_data); // takes ownership
- }
+ RecordId rid = RecordId(extractRecordId(it->first));
+ if (rid > startingPosition)
+ return RecordId();
- if (_isCapped) {
- invariant(_cappedMaxSize > 0);
- invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
- } else {
- invariant(_cappedMaxSize == -1);
- invariant(_cappedMaxDocs == -1);
- }
+ return rid;
}
-const char* EphemeralForTestRecordStore::name() const {
- return "EphemeralForTest";
-}
+Status RecordStore::oplogDiskLocRegister(OperationContext* opCtx,
+ const Timestamp& opTime,
+ bool orderedCommit) {
+ if (!orderedCommit) {
+ return opCtx->recoveryUnit()->setTimestamp(opTime);
+ }
-RecordData EphemeralForTestRecordStore::dataFor(OperationContext* opCtx,
- const RecordId& loc) const {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- return recordFor(lock, loc)->toRecordData();
+ return Status::OK();
}
-const EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecordStore::recordFor(
- WithLock, const RecordId& loc) const {
- Records::const_iterator it = _data->records.find(loc);
- if (it == _data->records.end()) {
- LOGV2_ERROR(23720,
- "EphemeralForTestRecordStore::recordFor cannot find record for {ns}:{loc}",
- "ns"_attr = ns(),
- "loc"_attr = loc);
- }
- invariant(it != _data->records.end());
- return &it->second;
-}
-
-EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecordStore::recordFor(
- WithLock, const RecordId& loc) {
- Records::iterator it = _data->records.find(loc);
- if (it == _data->records.end()) {
- LOGV2_ERROR(23721,
- "EphemeralForTestRecordStore::recordFor cannot find record for {ns}:{loc}",
- "ns"_attr = ns(),
- "loc"_attr = loc);
+void RecordStore::_initHighestIdIfNeeded(OperationContext* opCtx) {
+ // In the normal case, this will already be initialized, so use a weak load. Since this value
+ // will only change from 0 to a positive integer, the only risk is reading an outdated value, 0,
+ // and having to take the mutex.
+ if (_highestRecordId.loadRelaxed() > 0) {
+ return;
}
- invariant(it != _data->records.end());
- return &it->second;
-}
-bool EphemeralForTestRecordStore::findRecord(OperationContext* opCtx,
- const RecordId& loc,
- RecordData* rd) const {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- Records::const_iterator it = _data->records.find(loc);
- if (it == _data->records.end()) {
- return false;
+ // Only one thread needs to do this.
+ stdx::lock_guard<Latch> lk(_initHighestIdMutex);
+ if (_highestRecordId.load() > 0) {
+ return;
}
- *rd = it->second.toRecordData();
- return true;
-}
-void EphemeralForTestRecordStore::deleteRecord(OperationContext* opCtx, const RecordId& loc) {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
+ // Need to start at 1 so we are always higher than RecordId::min()
+ int64_t nextId = 1;
- deleteRecord(lock, opCtx, loc);
-}
+ // Find the largest RecordId currently in use.
+ std::unique_ptr<SeekableRecordCursor> cursor = getCursor(opCtx, /*forward=*/false);
+ if (auto record = cursor->next()) {
+ nextId = record->id.repr() + 1;
+ }
+
+ _highestRecordId.store(nextId);
+};
-void EphemeralForTestRecordStore::deleteRecord(WithLock lk,
- OperationContext* opCtx,
- const RecordId& loc) {
- EphemeralForTestRecord* rec = recordFor(lk, loc);
- opCtx->recoveryUnit()->registerChange(std::make_unique<RemoveChange>(opCtx, _data, loc, *rec));
- _data->dataSize -= rec->size;
- invariant(_data->records.erase(loc) == 1);
+int64_t RecordStore::_nextRecordId(OperationContext* opCtx) {
+ _initHighestIdIfNeeded(opCtx);
+ return _highestRecordId.fetchAndAdd(1);
}
-bool EphemeralForTestRecordStore::cappedAndNeedDelete(WithLock, OperationContext* opCtx) const {
+bool RecordStore::_cappedAndNeedDelete(OperationContext* opCtx, StringStore* workingCopy) {
if (!_isCapped)
return false;
- if (_data->dataSize > _cappedMaxSize)
+ if (dataSize(opCtx) > _cappedMaxSize)
return true;
- if ((_cappedMaxDocs != -1) && (numRecords(opCtx) > _cappedMaxDocs))
+ if ((_cappedMaxDocs != -1) && numRecords(opCtx) > _cappedMaxDocs)
return true;
-
return false;
}
-void EphemeralForTestRecordStore::cappedDeleteAsNeeded(WithLock lk, OperationContext* opCtx) {
- while (cappedAndNeedDelete(lk, opCtx)) {
- invariant(!_data->records.empty());
+void RecordStore::_cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* workingCopy) {
+ if (!_isCapped)
+ return;
- Records::iterator oldest = _data->records.begin();
- RecordId id = oldest->first;
- RecordData data = oldest->second.toRecordData();
+ // Create the lowest key for this identifier and use lower_bound() to get to the first one.
+ auto recordIt = workingCopy->lower_bound(_prefix);
- if (_cappedCallback)
- uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, id, data));
+ // Ensure only one thread at a time can do deletes, otherwise they'll conflict.
+ stdx::lock_guard<Latch> cappedDeleterLock(_cappedDeleterMutex);
- deleteRecord(lk, opCtx, id);
- }
-}
+ while (_cappedAndNeedDelete(opCtx, workingCopy)) {
-StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(WithLock,
- const char* data,
- int len) const {
- StatusWith<RecordId> status = oploghack::extractKey(data, len);
- if (!status.isOK())
- return status;
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
+ RecordId rid = RecordId(extractRecordId(recordIt->first));
- if (!_data->records.empty() && status.getValue() <= _data->records.rbegin()->first) {
+ if (_isOplog && _visibilityManager->isFirstHidden(rid)) {
+ // We have a record that hasn't been committed yet, so we shouldn't truncate anymore
+ // until it gets committed.
+ return;
+ }
- return StatusWith<RecordId>(ErrorCodes::BadValue,
- str::stream() << "attempted out-of-order oplog insert of "
- << status.getValue() << " (oplog last insert was "
- << _data->records.rbegin()->first << " )");
+ if (_cappedCallback) {
+ RecordData rd = RecordData(recordIt->second.c_str(), recordIt->second.length());
+ uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, rid, rd));
+ }
+
+ SizeAdjuster adjuster(opCtx, this);
+ invariant(numRecords(opCtx) > 0, str::stream() << numRecords(opCtx));
+
+ // Don't need to increment the iterator because the iterator gets revalidated and placed on
+ // the next item after the erase.
+ workingCopy->erase(recordIt->first);
+ auto ru = RecoveryUnit::get(opCtx);
+ ru->makeDirty();
}
- return status;
}
-Status EphemeralForTestRecordStore::insertRecords(OperationContext* opCtx,
- std::vector<Record>* inOutRecords,
- const std::vector<Timestamp>& timestamps) {
+RecordStore::Cursor::Cursor(OperationContext* opCtx,
+ const RecordStore& rs,
+ VisibilityManager* visibilityManager)
+ : opCtx(opCtx), _rs(rs), _visibilityManager(visibilityManager) {
+ if (_rs._isOplog) {
+ _oplogVisibility = _visibilityManager->getAllCommittedRecord();
+ }
+}
- for (auto& record : *inOutRecords) {
- if (_isCapped && record.data.size() > _cappedMaxSize) {
- // We use dataSize for capped rollover and we don't want to delete everything if we know
- // this won't fit.
- return Status(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
- }
+boost::optional<Record> RecordStore::Cursor::next() {
+ _savedPosition = boost::none;
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ if (_needFirstSeek) {
+ _needFirstSeek = false;
+ it = workingCopy->lower_bound(_rs._prefix);
+ } else if (it != workingCopy->end() && !_lastMoveWasRestore) {
+ ++it;
}
- const auto insertSingleFn = [this, opCtx](Record* record) {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- EphemeralForTestRecord rec(record->data.size());
- memcpy(rec.data.get(), record->data.data(), record->data.size());
-
- RecordId loc;
- if (_data->isOplog) {
- StatusWith<RecordId> status =
- extractAndCheckLocForOplog(lock, record->data.data(), record->data.size());
- if (!status.isOK())
- return status.getStatus();
- loc = status.getValue();
- } else {
- loc = allocateLoc(lock);
+ _lastMoveWasRestore = false;
+ if (it != workingCopy->end() && inPrefix(it->first)) {
+ _savedPosition = it->first;
+ Record nextRecord;
+ nextRecord.id = RecordId(extractRecordId(it->first));
+ nextRecord.data = RecordData(it->second.c_str(), it->second.length());
+
+ if (_rs._isOplog && nextRecord.id > _oplogVisibility) {
+ return boost::none;
}
- _data->dataSize += record->data.size();
- _data->records[loc] = rec;
- record->id = loc;
+ return nextRecord;
+ }
+ return boost::none;
+}
- opCtx->recoveryUnit()->registerChange(std::make_unique<InsertChange>(opCtx, _data, loc));
- cappedDeleteAsNeeded(lock, opCtx);
+boost::optional<Record> RecordStore::Cursor::seekExact(const RecordId& id) {
+ _savedPosition = boost::none;
+ _lastMoveWasRestore = false;
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ std::string key = createKey(_rs._ident, id.repr());
+ it = workingCopy->find(key);
- return Status::OK();
- };
+ if (it == workingCopy->end() || !inPrefix(it->first))
+ return boost::none;
- for (auto& record : *inOutRecords) {
- auto status = insertSingleFn(&record);
- if (!status.isOK())
- return status;
+ if (_rs._isOplog && id > _oplogVisibility) {
+ return boost::none;
}
- return Status::OK();
+ _needFirstSeek = false;
+ _savedPosition = it->first;
+ return Record{id, RecordData(it->second.c_str(), it->second.length())};
}
-Status EphemeralForTestRecordStore::updateRecord(OperationContext* opCtx,
- const RecordId& loc,
- const char* data,
- int len) {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- EphemeralForTestRecord* oldRecord = recordFor(lock, loc);
- int oldLen = oldRecord->size;
+// Positions are saved as we go.
+void RecordStore::Cursor::save() {}
+void RecordStore::Cursor::saveUnpositioned() {}
- // Documents in capped collections cannot change size. We check that above the storage layer.
- invariant(!_isCapped || len == oldLen);
+bool RecordStore::Cursor::restore() {
+ if (!_savedPosition)
+ return true;
- EphemeralForTestRecord newRecord(len);
- memcpy(newRecord.data.get(), data, len);
+ // Get oplog visibility before forking working tree to guarantee that nothing gets committed
+ // after we've forked that would update oplog visibility
+ if (_rs._isOplog) {
+ _oplogVisibility = _visibilityManager->getAllCommittedRecord();
+ }
- opCtx->recoveryUnit()->registerChange(
- std::make_unique<RemoveChange>(opCtx, _data, loc, *oldRecord));
- _data->dataSize += len - oldLen;
- *oldRecord = newRecord;
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ it = workingCopy->lower_bound(_savedPosition.value());
+ _lastMoveWasRestore = it == workingCopy->end() || it->first != _savedPosition.value();
- cappedDeleteAsNeeded(lock, opCtx);
- return Status::OK();
+ // Capped iterators die on invalidation rather than advancing.
+ return !(_rs._isCapped && _lastMoveWasRestore);
}
-bool EphemeralForTestRecordStore::updateWithDamagesSupported() const {
- return true;
+void RecordStore::Cursor::detachFromOperationContext() {
+ invariant(opCtx != nullptr);
+ opCtx = nullptr;
}
-StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
- OperationContext* opCtx,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages) {
-
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
-
- EphemeralForTestRecord* oldRecord = recordFor(lock, loc);
- const int len = oldRecord->size;
-
- EphemeralForTestRecord newRecord(len);
- memcpy(newRecord.data.get(), oldRecord->data.get(), len);
+void RecordStore::Cursor::reattachToOperationContext(OperationContext* opCtx) {
+ invariant(opCtx != nullptr);
+ this->opCtx = opCtx;
+}
- opCtx->recoveryUnit()->registerChange(
- std::make_unique<RemoveChange>(opCtx, _data, loc, *oldRecord));
- *oldRecord = newRecord;
+bool RecordStore::Cursor::inPrefix(const std::string& key_string) {
+ return (key_string > _rs._prefix) && (key_string < _rs._postfix);
+}
- cappedDeleteAsNeeded(lock, opCtx);
+RecordStore::ReverseCursor::ReverseCursor(OperationContext* opCtx,
+ const RecordStore& rs,
+ VisibilityManager* visibilityManager)
+ : opCtx(opCtx), _rs(rs), _visibilityManager(visibilityManager) {
+ _savedPosition = boost::none;
+}
- char* root = newRecord.data.get();
- mutablebson::DamageVector::const_iterator where = damages.begin();
- const mutablebson::DamageVector::const_iterator end = damages.end();
- for (; where != end; ++where) {
- const char* sourcePtr = damageSource + where->sourceOffset;
- char* targetPtr = root + where->targetOffset;
- std::memcpy(targetPtr, sourcePtr, where->size);
+boost::optional<Record> RecordStore::ReverseCursor::next() {
+ _savedPosition = boost::none;
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ if (_needFirstSeek) {
+ _needFirstSeek = false;
+ it = StringStore::const_reverse_iterator(workingCopy->upper_bound(_rs._postfix));
+ } else if (it != workingCopy->rend() && !_lastMoveWasRestore) {
+ ++it;
}
+ _lastMoveWasRestore = false;
- *oldRecord = newRecord;
+ if (it != workingCopy->rend() && inPrefix(it->first)) {
+ _savedPosition = it->first;
+ Record nextRecord;
+ nextRecord.id = RecordId(extractRecordId(it->first));
+ nextRecord.data = RecordData(it->second.c_str(), it->second.length());
- return newRecord.toRecordData();
+ return nextRecord;
+ }
+ return boost::none;
}
-std::unique_ptr<SeekableRecordCursor> EphemeralForTestRecordStore::getCursor(
- OperationContext* opCtx, bool forward) const {
- if (forward)
- return std::make_unique<Cursor>(opCtx, *this);
- return std::make_unique<ReverseCursor>(opCtx, *this);
-}
+boost::optional<Record> RecordStore::ReverseCursor::seekExact(const RecordId& id) {
+ _needFirstSeek = false;
+ _savedPosition = boost::none;
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ std::string key = createKey(_rs._ident, id.repr());
+ StringStore::const_iterator canFind = workingCopy->find(key);
+ if (canFind == workingCopy->end() || !inPrefix(canFind->first)) {
+ it = workingCopy->rend();
+ return boost::none;
+ }
-Status EphemeralForTestRecordStore::truncate(OperationContext* opCtx) {
- // Unlike other changes, TruncateChange mutates _data on construction to perform the
- // truncate
- opCtx->recoveryUnit()->registerChange(std::make_unique<TruncateChange>(opCtx, _data));
- return Status::OK();
+ it = StringStore::const_reverse_iterator(++canFind); // reverse iterator returns item 1 before
+ _savedPosition = it->first;
+ return Record{id, RecordData(it->second.c_str(), it->second.length())};
}
-void EphemeralForTestRecordStore::cappedTruncateAfter(OperationContext* opCtx,
- RecordId end,
- bool inclusive) {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- Records::iterator it =
- inclusive ? _data->records.lower_bound(end) : _data->records.upper_bound(end);
- while (it != _data->records.end()) {
- RecordId id = it->first;
- EphemeralForTestRecord record = it->second;
+void RecordStore::ReverseCursor::save() {}
+void RecordStore::ReverseCursor::saveUnpositioned() {}
- if (_cappedCallback) {
- uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, id, record.toRecordData()));
- }
+bool RecordStore::ReverseCursor::restore() {
+ if (!_savedPosition)
+ return true;
- opCtx->recoveryUnit()->registerChange(
- std::make_unique<RemoveChange>(opCtx, _data, id, record));
- _data->dataSize -= record.size;
- _data->records.erase(it++);
- }
-}
+ StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
+ it = StringStore::const_reverse_iterator(workingCopy->upper_bound(_savedPosition.value()));
+ _lastMoveWasRestore = (it == workingCopy->rend() || it->first != _savedPosition.value());
-void EphemeralForTestRecordStore::appendCustomStats(OperationContext* opCtx,
- BSONObjBuilder* result,
- double scale) const {
- result->appendBool("capped", _isCapped);
- if (_isCapped) {
- result->appendIntOrLL("max", _cappedMaxDocs);
- result->appendIntOrLL("maxSize", _cappedMaxSize / scale);
- }
+ // Capped iterators die on invalidation rather than advancing.
+ return !(_rs._isCapped && _lastMoveWasRestore);
}
-int64_t EphemeralForTestRecordStore::storageSize(OperationContext* opCtx,
- BSONObjBuilder* extraInfo,
- int infoLevel) const {
- // Note: not making use of extraInfo or infoLevel since we don't have extents
- const int64_t recordOverhead = numRecords(opCtx) * sizeof(EphemeralForTestRecord);
- return _data->dataSize + recordOverhead;
+void RecordStore::ReverseCursor::detachFromOperationContext() {
+ invariant(opCtx != nullptr);
+ opCtx = nullptr;
}
-long long EphemeralForTestRecordStore::numRecords(OperationContext* opCtx) const {
- if (MONGO_unlikely(ephemeralForTestReturnIncorrectNumRecords.shouldFail())) {
- return _data->records.size() + 1;
- }
- return _data->records.size();
+void RecordStore::ReverseCursor::reattachToOperationContext(OperationContext* opCtx) {
+ invariant(opCtx != nullptr);
+ this->opCtx = opCtx;
}
-RecordId EphemeralForTestRecordStore::allocateLoc(WithLock) {
- RecordId out = RecordId(_data->nextId++);
- invariant(out.isNormal());
- return out;
+bool RecordStore::ReverseCursor::inPrefix(const std::string& key_string) {
+ return (key_string > _rs._prefix) && (key_string < _rs._postfix);
}
-boost::optional<RecordId> EphemeralForTestRecordStore::oplogStartHack(
- OperationContext* opCtx, const RecordId& startingPosition) const {
- if (!_data->isOplog)
- return boost::none;
-
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- const Records& records = _data->records;
-
- if (records.empty())
- return RecordId();
-
- Records::const_iterator it = records.lower_bound(startingPosition);
- if (it == records.end() || it->first > startingPosition) {
- // If the startingPosition is before the oldest oplog entry, this ensures that we return
- // RecordId() as specified in record_store.h.
- if (it == records.begin()) {
- return RecordId();
- }
- --it;
- }
-
- return it->first;
+RecordStore::SizeAdjuster::SizeAdjuster(OperationContext* opCtx, RecordStore* rs)
+ : _opCtx(opCtx),
+ _rs(rs),
+ _workingCopy(ephemeral_for_test::RecoveryUnit::get(opCtx)->getHead()),
+ _origNumRecords(_workingCopy->size()),
+ _origDataSize(_workingCopy->dataSize()) {}
+
+RecordStore::SizeAdjuster::~SizeAdjuster() {
+ // SERVER-48981 This implementation of fastcount results in inaccurate values. This storage
+ // engine emits write conflict exceptions at commit-time leading to the fastcount to be
+ // inaccurate until the rollback happens.
+ // If proper local isolation is implemented, SERVER-38883 can also be fulfulled for this storage
+ // engine where we can invariant for correct fastcount in updateStatsAfterRepair()
+ int64_t deltaNumRecords = _workingCopy->size() - _origNumRecords;
+ int64_t deltaDataSize = _workingCopy->dataSize() - _origDataSize;
+ _rs->_numRecords.fetchAndAdd(deltaNumRecords);
+ _rs->_dataSize.fetchAndAdd(deltaDataSize);
+ RecoveryUnit::get(_opCtx)->onRollback([rs = _rs, deltaNumRecords, deltaDataSize]() {
+ rs->_numRecords.fetchAndSubtract(deltaNumRecords);
+ rs->_dataSize.fetchAndSubtract(deltaDataSize);
+ });
}
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
index c37b69ba3e4..30b319dbc41 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
@@ -29,39 +29,43 @@
#pragma once
-#include <boost/shared_array.hpp>
+#include <atomic>
#include <map>
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/capped_callback.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.h"
#include "mongo/db/storage/record_store.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/platform/mutex.h"
-#include "mongo/util/concurrency/with_lock.h"
-
namespace mongo {
+namespace ephemeral_for_test {
/**
* A RecordStore that stores all data in-memory.
- *
- * @param cappedMaxSize - required if isCapped. limit uses dataSize() in this impl.
*/
-class EphemeralForTestRecordStore : public RecordStore {
+class RecordStore final : public ::mongo::RecordStore {
public:
- explicit EphemeralForTestRecordStore(StringData ns,
- std::shared_ptr<void>* dataInOut,
- bool isCapped = false,
- int64_t cappedMaxSize = -1,
- int64_t cappedMaxDocs = -1,
- CappedCallback* cappedCallback = nullptr);
+ explicit RecordStore(StringData ns,
+ StringData ident,
+ bool isCapped = false,
+ int64_t cappedMaxSize = -1,
+ int64_t cappedMaxDocs = -1,
+ CappedCallback* cappedCallback = nullptr,
+ VisibilityManager* visibilityManager = nullptr);
+ ~RecordStore() = default;
virtual const char* name() const;
-
- const std::string& getIdent() const override {
- return ns();
- }
-
- virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
+ virtual const std::string& getIdent() const;
+ virtual long long dataSize(OperationContext* opCtx) const;
+ virtual long long numRecords(OperationContext* opCtx) const;
+ virtual bool isCapped() const;
+ virtual void setCappedCallback(CappedCallback*);
+ virtual int64_t storageSize(OperationContext* opCtx,
+ BSONObjBuilder* extraInfo = nullptr,
+ int infoLevel = 0) const;
virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const;
@@ -84,10 +88,15 @@ public:
const char* damageSource,
const mutablebson::DamageVector& damages);
+ Status oplogDiskLocRegister(OperationContext* opCtx,
+ const Timestamp& opTime,
+ bool orderedCommit) override;
+
std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
virtual Status truncate(OperationContext* opCtx);
+ StatusWith<int64_t> truncateWithoutUpdatingCount(RecoveryUnit* ru);
virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
@@ -95,93 +104,127 @@ public:
BSONObjBuilder* result,
double scale) const;
- virtual int64_t storageSize(OperationContext* opCtx,
- BSONObjBuilder* extraInfo = nullptr,
- int infoLevel = 0) const;
-
- virtual long long dataSize(OperationContext* opCtx) const {
- return _data->dataSize;
- }
-
- virtual long long numRecords(OperationContext* opCtx) const;
-
virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
const RecordId& startingPosition) const;
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override;
virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
- long long dataSize) {
- stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
- invariant(_data->records.size() == size_t(numRecords));
- _data->dataSize = dataSize;
- }
-
-protected:
- struct EphemeralForTestRecord {
- EphemeralForTestRecord() : size(0) {}
- EphemeralForTestRecord(int size) : size(size), data(new char[size]) {}
-
- RecordData toRecordData() const {
- return RecordData(data.get(), size);
- }
-
- int size;
- boost::shared_array<char> data;
- };
-
- virtual const EphemeralForTestRecord* recordFor(WithLock, const RecordId& loc) const;
- virtual EphemeralForTestRecord* recordFor(WithLock, const RecordId& loc);
-
-public:
- //
- // Not in RecordStore interface
- //
-
- typedef std::map<RecordId, EphemeralForTestRecord> Records;
-
- bool isCapped() const {
- return _isCapped;
- }
- void setCappedCallback(CappedCallback* cb) {
- _cappedCallback = cb;
- }
+ long long dataSize);
private:
- class InsertChange;
- class RemoveChange;
- class TruncateChange;
+ friend class VisibilityManagerChange;
- class Cursor;
- class ReverseCursor;
+ void _initHighestIdIfNeeded(OperationContext* opCtx);
- StatusWith<RecordId> extractAndCheckLocForOplog(WithLock, const char* data, int len) const;
+ /**
+ * This gets the next (guaranteed) unique record id.
+ */
+ int64_t _nextRecordId(OperationContext* opCtx);
- RecordId allocateLoc(WithLock);
- bool cappedAndNeedDelete(WithLock, OperationContext* opCtx) const;
- void cappedDeleteAsNeeded(WithLock lk, OperationContext* opCtx);
- void deleteRecord(WithLock lk, OperationContext* opCtx, const RecordId& dl);
+ /**
+ * Two helper functions for deleting excess records in capped record stores.
+ * The caller should not have an active SizeAdjuster.
+ */
+ bool _cappedAndNeedDelete(OperationContext* opCtx, StringStore* workingCopy);
+ void _cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* workingCopy);
- // TODO figure out a proper solution to metadata
const bool _isCapped;
const int64_t _cappedMaxSize;
const int64_t _cappedMaxDocs;
+
+ std::string _identStr;
+ StringData _ident;
+
+ std::string _prefix;
+ std::string _postfix;
+
+ mutable Mutex _cappedCallbackMutex =
+ MONGO_MAKE_LATCH("RecordStore::_cappedCallbackMutex"); // Guards _cappedCallback
CappedCallback* _cappedCallback;
- // This is the "persistent" data.
- struct Data {
- Data(StringData ns, bool isOplog)
- : dataSize(0), recordsMutex(), nextId(1), isOplog(isOplog) {}
+ mutable Mutex _cappedDeleterMutex = MONGO_MAKE_LATCH("RecordStore::_cappedDeleterMutex");
+
+ mutable Mutex _initHighestIdMutex = MONGO_MAKE_LATCH("RecordStore::_initHighestIdMutex");
+ AtomicWord<long long> _highestRecordId{0};
+ AtomicWord<long long> _numRecords{0};
+ AtomicWord<long long> _dataSize{0};
+
+ std::string generateKey(const uint8_t* key, size_t key_len) const;
+
+ bool _isOplog;
+ VisibilityManager* _visibilityManager;
+
+ /**
+ * Automatically adjust the record count and data size based on the size in change of the
+ * underlying radix store during the life time of the SizeAdjuster.
+ */
+ friend class SizeAdjuster;
+ class SizeAdjuster {
+ public:
+ SizeAdjuster(OperationContext* opCtx, RecordStore* rs);
+ ~SizeAdjuster();
+
+ private:
+ OperationContext* const _opCtx;
+ RecordStore* const _rs;
+ const StringStore* _workingCopy;
+ const int64_t _origNumRecords;
+ const int64_t _origDataSize;
+ };
- int64_t dataSize;
- stdx::recursive_mutex recordsMutex;
- Records records;
- int64_t nextId;
- const bool isOplog;
+ class Cursor final : public SeekableRecordCursor {
+ OperationContext* opCtx;
+ const RecordStore& _rs;
+ StringStore::const_iterator it;
+ boost::optional<std::string> _savedPosition;
+ bool _needFirstSeek = true;
+ bool _lastMoveWasRestore = false;
+ VisibilityManager* _visibilityManager;
+ RecordId _oplogVisibility;
+
+ public:
+ Cursor(OperationContext* opCtx,
+ const RecordStore& rs,
+ VisibilityManager* visibilityManager);
+ boost::optional<Record> next() final;
+ boost::optional<Record> seekExact(const RecordId& id) final override;
+ void save() final;
+ void saveUnpositioned() final override;
+ bool restore() final;
+ void detachFromOperationContext() final;
+ void reattachToOperationContext(OperationContext* opCtx) final;
+
+ private:
+ bool inPrefix(const std::string& key_string);
};
- Data* const _data;
+ class ReverseCursor final : public SeekableRecordCursor {
+ OperationContext* opCtx;
+ const RecordStore& _rs;
+ StringStore::const_reverse_iterator it;
+ boost::optional<std::string> _savedPosition;
+ bool _needFirstSeek = true;
+ bool _lastMoveWasRestore = false;
+ VisibilityManager* _visibilityManager;
+
+ public:
+ ReverseCursor(OperationContext* opCtx,
+ const RecordStore& rs,
+ VisibilityManager* visibilityManager);
+ boost::optional<Record> next() final;
+ boost::optional<Record> seekExact(const RecordId& id) final override;
+ void save() final;
+ void saveUnpositioned() final override;
+ bool restore() final;
+ void detachFromOperationContext() final;
+ void reattachToOperationContext(OperationContext* opCtx) final;
+
+ private:
+ bool inPrefix(const std::string& key_string);
+ };
};
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
index 7cee38decf9..5889fb4c813 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
@@ -34,55 +34,71 @@
#include <memory>
#include "mongo/base/init.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
#include "mongo/db/storage/record_store_test_harness.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
+namespace ephemeral_for_test {
namespace {
-class EphemeralForTestHarnessHelper final : public RecordStoreHarnessHelper {
+class RecordStoreHarnessHelper final : public ::mongo::RecordStoreHarnessHelper {
+ KVEngine _kvEngine{};
+ VisibilityManager _visibilityManager;
+
public:
- EphemeralForTestHarnessHelper() {}
+ RecordStoreHarnessHelper() {}
- virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() {
+ virtual std::unique_ptr<mongo::RecordStore> newNonCappedRecordStore() {
return newNonCappedRecordStore("a.b");
}
- virtual std::unique_ptr<RecordStore> newNonCappedRecordStore(const std::string& ns) {
- return std::make_unique<EphemeralForTestRecordStore>(ns, &data);
+ virtual std::unique_ptr<mongo::RecordStore> newNonCappedRecordStore(const std::string& ns) {
+ return std::make_unique<RecordStore>(ns,
+ "ident"_sd /* ident */,
+ false /* isCapped */,
+ -1 /* cappedMaxSize */,
+ -1 /* cappedMaxDocs */,
+ nullptr /* cappedCallback */,
+ nullptr /* visibilityManager */);
}
- virtual std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedSizeBytes,
- int64_t cappedMaxDocs) {
+ virtual std::unique_ptr<mongo::RecordStore> newCappedRecordStore(int64_t cappedSizeBytes,
+ int64_t cappedMaxDocs) {
return newCappedRecordStore("a.b", cappedSizeBytes, cappedMaxDocs);
}
- virtual std::unique_ptr<RecordStore> newCappedRecordStore(const std::string& ns,
- int64_t cappedSizeBytes,
- int64_t cappedMaxDocs) final {
- return std::make_unique<EphemeralForTestRecordStore>(
- ns, &data, true, cappedSizeBytes, cappedMaxDocs);
+ virtual std::unique_ptr<mongo::RecordStore> newCappedRecordStore(const std::string& ns,
+ int64_t cappedSizeBytes,
+ int64_t cappedMaxDocs) final {
+ return std::make_unique<RecordStore>(ns,
+ "ident"_sd,
+ /*isCapped*/ true,
+ cappedSizeBytes,
+ cappedMaxDocs,
+ /*cappedCallback*/ nullptr,
+ &_visibilityManager);
}
- std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
- return std::make_unique<EphemeralForTestRecoveryUnit>();
+ std::unique_ptr<mongo::RecoveryUnit> newRecoveryUnit() final {
+ return std::make_unique<RecoveryUnit>(&_kvEngine);
}
bool supportsDocLocking() final {
- return false;
+ return true;
}
-
- std::shared_ptr<void> data;
};
-std::unique_ptr<RecordStoreHarnessHelper> makeEphemeralForTestRecordStoreHarness() {
- return std::make_unique<EphemeralForTestHarnessHelper>();
+std::unique_ptr<mongo::RecordStoreHarnessHelper> makeRecordStoreHarnessHelper() {
+ return std::make_unique<RecordStoreHarnessHelper>();
}
MONGO_INITIALIZER(RegisterRecordStoreHarnessFactory)(InitializerContext* const) {
- mongo::registerRecordStoreHarnessHelperFactory(makeEphemeralForTestRecordStoreHarness);
+ mongo::registerRecordStoreHarnessHelperFactory(makeRecordStoreHarnessHelper);
return Status::OK();
}
} // namespace
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
index bd62df17240..2b684221ee8 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
@@ -31,87 +31,122 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
+#include <mutex>
-#include "mongo/db/storage/sorted_data_interface.h"
-#include "mongo/logv2/log.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
+#include "mongo/db/storage/oplog_hack.h"
namespace mongo {
+namespace ephemeral_for_test {
-EphemeralForTestRecoveryUnit::~EphemeralForTestRecoveryUnit() {
+RecoveryUnit::RecoveryUnit(KVEngine* parentKVEngine, std::function<void()> cb)
+ : _waitUntilDurableCallback(cb), _KVEngine(parentKVEngine) {}
+
+RecoveryUnit::~RecoveryUnit() {
invariant(!_inUnitOfWork(), toString(_getState()));
+ _abort();
}
-void EphemeralForTestRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
+void RecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
invariant(!_inUnitOfWork(), toString(_getState()));
_setState(State::kInactiveInUnitOfWork);
}
-void EphemeralForTestRecoveryUnit::doCommitUnitOfWork() {
+void RecoveryUnit::doCommitUnitOfWork() {
invariant(_inUnitOfWork(), toString(_getState()));
- _setState(State::kCommitting);
- try {
- for (Changes::iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
- (*it)->commit(boost::none);
+ if (_dirty) {
+ invariant(_forked);
+ while (true) {
+ std::pair<uint64_t, StringStore> masterInfo = _KVEngine->getMasterInfo();
+ try {
+ _workingCopy.merge3(_mergeBase, masterInfo.second);
+ } catch (const merge_conflict_exception&) {
+ throw WriteConflictException();
+ }
+
+ if (_KVEngine->trySwapMaster(_workingCopy, masterInfo.first)) {
+ // Merged successfully
+ break;
+ } else {
+ // Retry the merge, but update the mergeBase since some progress was made merging.
+ _mergeBase = masterInfo.second;
+ }
}
- _changes.clear();
- } catch (...) {
- std::terminate();
- }
-
- // This ensures that the journal listener gets called on each commit.
- // SERVER-22575: Remove this once we add a generic mechanism to periodically wait
- // for durability.
- if (_waitUntilDurableCallback) {
- _waitUntilDurableCallback();
+ _forked = false;
+ _dirty = false;
+ } else if (_forked) {
+ if (kDebugBuild)
+ invariant(_mergeBase == _workingCopy);
}
+ _setState(State::kCommitting);
+ commitRegisteredChanges(boost::none);
_setState(State::kInactive);
}
-void EphemeralForTestRecoveryUnit::doAbortUnitOfWork() {
+void RecoveryUnit::doAbortUnitOfWork() {
invariant(_inUnitOfWork(), toString(_getState()));
- _setState(State::kAborting);
-
- try {
- for (Changes::reverse_iterator it = _changes.rbegin(), end = _changes.rend(); it != end;
- ++it) {
- auto change = *it;
- LOGV2_DEBUG(22217,
- 2,
- "CUSTOM ROLLBACK {demangleName_typeid_change}",
- "demangleName_typeid_change"_attr = demangleName(typeid(*change)));
- change->rollback();
- }
- _changes.clear();
- } catch (...) {
- std::terminate();
- }
+ _abort();
+}
- _setState(State::kInactive);
+bool RecoveryUnit::waitUntilDurable(OperationContext* opCtx) {
+ invariant(!_inUnitOfWork(), toString(_getState()));
+ invariant(!opCtx->lockState()->isLocked() || storageGlobalParams.repair);
+ return true; // This is an in-memory storage engine.
}
-bool EphemeralForTestRecoveryUnit::waitUntilDurable(OperationContext* opCtx) {
- if (_waitUntilDurableCallback) {
- _waitUntilDurableCallback();
- }
- return true;
+Status RecoveryUnit::obtainMajorityCommittedSnapshot() {
+ return Status::OK();
}
-bool EphemeralForTestRecoveryUnit::inActiveTxn() const {
- return _inUnitOfWork();
+void RecoveryUnit::prepareUnitOfWork() {
+ invariant(_inUnitOfWork());
}
-void EphemeralForTestRecoveryUnit::doAbandonSnapshot() {
+void RecoveryUnit::doAbandonSnapshot() {
invariant(!_inUnitOfWork(), toString(_getState()));
+ _forked = false;
+ _dirty = false;
+}
+
+bool RecoveryUnit::forkIfNeeded() {
+ if (_forked)
+ return false;
+
+ // Update the copies of the trees when not in a WUOW so cursors can retrieve the latest data.
+
+ std::pair<uint64_t, StringStore> masterInfo = _KVEngine->getMasterInfo();
+ StringStore master = masterInfo.second;
+
+ _mergeBase = master;
+ _workingCopy = master;
+
+ _forked = true;
+ return true;
}
-Status EphemeralForTestRecoveryUnit::obtainMajorityCommittedSnapshot() {
+Status RecoveryUnit::setTimestamp(Timestamp timestamp) {
+ auto key = oploghack::keyForOptime(timestamp);
+ if (!key.isOK())
+ return key.getStatus();
+
+ _KVEngine->visibilityManager()->reserveRecord(this, key.getValue());
return Status::OK();
}
+void RecoveryUnit::setOrderedCommit(bool orderedCommit) {}
+
+void RecoveryUnit::_abort() {
+ _forked = false;
+ _dirty = false;
+ _setState(State::kAborting);
+ abortRegisteredChanges();
+ _setState(State::kInactive);
+}
-void EphemeralForTestRecoveryUnit::registerChange(std::unique_ptr<Change> change) {
- _changes.push_back(std::move(change));
+RecoveryUnit* RecoveryUnit::get(OperationContext* opCtx) {
+ return checked_cast<ephemeral_for_test::RecoveryUnit*>(opCtx->recoveryUnit());
}
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h
index 568a8fd19a5..327c40e86b8 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h
@@ -33,32 +33,31 @@
#include <vector>
#include "mongo/db/record_id.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
#include "mongo/db/storage/recovery_unit.h"
namespace mongo {
+namespace ephemeral_for_test {
-class SortedDataInterface;
-
-class EphemeralForTestRecoveryUnit : public RecoveryUnit {
+class RecoveryUnit : public ::mongo::RecoveryUnit {
public:
- EphemeralForTestRecoveryUnit(std::function<void()> cb = nullptr)
- : _waitUntilDurableCallback(cb) {}
+ RecoveryUnit(KVEngine* parentKVEngine, std::function<void()> cb = nullptr);
+ ~RecoveryUnit();
- virtual ~EphemeralForTestRecoveryUnit();
+ void beginUnitOfWork(OperationContext* opCtx) override final;
- void beginUnitOfWork(OperationContext* opCtx) final;
+ bool inActiveTxn() const {
+ return _inUnitOfWork();
+ }
- virtual bool waitUntilDurable(OperationContext* opCtx);
+ virtual bool waitUntilDurable(OperationContext* opCtx) override;
- bool inActiveTxn() const;
+ virtual void setOrderedCommit(bool orderedCommit) override;
Status obtainMajorityCommittedSnapshot() final;
- virtual void registerChange(std::unique_ptr<Change> change);
-
- virtual void setOrderedCommit(bool orderedCommit) {}
-
- virtual void prepareUnitOfWork() override {}
+ void prepareUnitOfWork() override;
virtual void setPrepareTimestamp(Timestamp ts) override {
_prepareTimestamp = ts;
@@ -80,19 +79,47 @@ public:
_commitTimestamp = Timestamp::min();
}
+ Status setTimestamp(Timestamp timestamp) override;
+
+ // Ephemeral for test specific function declarations below.
+ StringStore* getHead() {
+ forkIfNeeded();
+ return &_workingCopy;
+ }
+
+ inline void makeDirty() {
+ _dirty = true;
+ }
+
+ /**
+ * Checks if there already exists a current working copy and merge base; if not fetches
+ * one and creates them.
+ */
+ bool forkIfNeeded();
+
+ static RecoveryUnit* get(OperationContext* opCtx);
+
private:
- void doCommitUnitOfWork() final;
- void doAbortUnitOfWork() final;
+ void doCommitUnitOfWork() override final;
+
+ void doAbortUnitOfWork() override final;
- void doAbandonSnapshot() final;
+ void doAbandonSnapshot() override final;
- typedef std::vector<std::shared_ptr<Change>> Changes;
+ void _abort();
- Changes _changes;
std::function<void()> _waitUntilDurableCallback;
+ // Official master is kept by KVEngine
+ KVEngine* _KVEngine;
+ StringStore _mergeBase;
+ StringStore _workingCopy;
+
+ bool _forked = false;
+ bool _dirty = false; // Whether or not we have written to this _workingCopy.
Timestamp _prepareTimestamp = Timestamp::min();
Timestamp _commitTimestamp = Timestamp::min();
};
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_recovery_unit_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp
index 9af948c7be3..d796c783c4e 100644
--- a/src/mongo/db/storage/biggie/biggie_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit_test.cpp
@@ -33,17 +33,17 @@
#include "mongo/base/init.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
-#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
#include "mongo/db/storage/recovery_unit_test_harness.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
namespace {
-class BiggieRecoveryUnitHarnessHelper final : public RecoveryUnitHarnessHelper {
+class RecoveryUnitHarnessHelper final : public mongo::RecoveryUnitHarnessHelper {
public:
- BiggieRecoveryUnitHarnessHelper() = default;
+ RecoveryUnitHarnessHelper() = default;
virtual std::unique_ptr<mongo::RecoveryUnit> newRecoveryUnit() final {
return std::make_unique<RecoveryUnit>(&_kvEngine);
@@ -63,15 +63,15 @@ private:
KVEngine _kvEngine{};
};
-std::unique_ptr<RecoveryUnitHarnessHelper> makeBiggieRecoveryUnitHarnessHelper() {
- return std::make_unique<BiggieRecoveryUnitHarnessHelper>();
+std::unique_ptr<mongo::RecoveryUnitHarnessHelper> makeRecoveryUnitHarnessHelper() {
+ return std::make_unique<RecoveryUnitHarnessHelper>();
}
MONGO_INITIALIZER(RegisterRecoveryUnitHarnessFactory)(InitializerContext* const) {
- mongo::registerRecoveryUnitHarnessHelperFactory(makeBiggieRecoveryUnitHarnessHelper);
+ mongo::registerRecoveryUnitHarnessHelperFactory(makeRecoveryUnitHarnessHelper);
return Status::OK();
}
} // namespace
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_server_status.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.cpp
index f2566a59efe..32939677129 100644
--- a/src/mongo/db/storage/biggie/biggie_server_status.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.cpp
@@ -31,31 +31,31 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/storage/biggie/biggie_server_status.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
-#include "mongo/db/storage/biggie/biggie_record_store.h"
-#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h"
#include "mongo/logv2/log.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
-BiggieServerStatusSection::BiggieServerStatusSection(KVEngine* engine)
- : ServerStatusSection(kBiggieEngineName), _engine(engine) {}
+ServerStatusSection::ServerStatusSection(KVEngine* engine)
+ : mongo::ServerStatusSection(kEngineName), _engine(engine) {}
-bool BiggieServerStatusSection::includeByDefault() const {
+bool ServerStatusSection::includeByDefault() const {
return true;
}
-BSONObj BiggieServerStatusSection::generateSection(OperationContext* opCtx,
- const BSONElement& configElement) const {
+BSONObj ServerStatusSection::generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
Lock::GlobalLock lk(
opCtx, LockMode::MODE_IS, Date_t::now(), Lock::InterruptBehavior::kLeaveUnlocked);
if (!lk.isLocked()) {
- LOGV2_DEBUG(4919800, 2, "Failed to retrieve biggie statistics");
+ LOGV2_DEBUG(4919800, 2, "Failed to retrieve ephemeralForTest statistics");
return BSONObj();
}
@@ -67,5 +67,5 @@ BSONObj BiggieServerStatusSection::generateSection(OperationContext* opCtx,
return bob.obj();
}
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_server_status.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.h
index 4e1264af4ab..12868f8a7cc 100644
--- a/src/mongo/db/storage/biggie/biggie_server_status.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_server_status.h
@@ -32,16 +32,16 @@
#include "mongo/db/commands/server_status.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
class KVEngine;
/**
* Adds "biggie" to the results of db.serverStatus().
*/
-class BiggieServerStatusSection : public ServerStatusSection {
+class ServerStatusSection : public mongo::ServerStatusSection {
public:
- BiggieServerStatusSection(KVEngine* engine);
+ ServerStatusSection(KVEngine* engine);
bool includeByDefault() const override;
BSONObj generateSection(OperationContext* opCtx,
const BSONElement& configElement) const override;
@@ -50,5 +50,5 @@ private:
KVEngine* _engine;
};
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp
index 4a803bae827..a4737f23e03 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp
@@ -41,9 +41,9 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
-#include "mongo/db/storage/biggie/biggie_sorted_impl.h"
-#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.h"
#include "mongo/db/storage/index_entry_comparison.h"
#include "mongo/db/storage/key_string.h"
#include "mongo/util/bufreader.h"
@@ -52,7 +52,7 @@
#include "mongo/util/str.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
namespace {
// Helper to interpret index data buffer
@@ -1331,7 +1331,7 @@ void SortedDataInterfaceUnique::unindex(OperationContext* opCtx,
// This function is, as of now, not in the interface, but there exists a server ticket to add
// truncate to the list of commands able to be used.
Status SortedDataInterfaceBase::truncate(mongo::RecoveryUnit* ru) {
- auto bRu = checked_cast<biggie::RecoveryUnit*>(ru);
+ auto bRu = checked_cast<ephemeral_for_test::RecoveryUnit*>(ru);
StringStore* workingCopy(bRu->getHead());
std::vector<std::string> toDelete;
auto end = workingCopy->upper_bound(_KSForIdentEnd);
@@ -1529,5 +1529,5 @@ std::unique_ptr<mongo::SortedDataInterface::Cursor> SortedDataInterfaceStandard:
_KSForIdentEnd);
}
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.h
index c2d8919a08d..d0dbe23d92d 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.h
@@ -30,12 +30,12 @@
#include "mongo/db/index/index_descriptor_fwd.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/sorted_data_interface.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
class SortedDataBuilderBase : public SortedDataBuilderInterface {
public:
@@ -148,5 +148,5 @@ public:
std::unique_ptr<mongo::SortedDataInterface::Cursor> newCursor(
OperationContext* opCtx, bool isForward = true) const override;
};
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl_test.cpp
index a5e5127e0a7..6535535505b 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl_test.cpp
@@ -29,30 +29,30 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/storage/biggie/biggie_sorted_impl.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.h"
#include <memory>
#include "mongo/base/init.h"
#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/storage/biggie/biggie_kv_engine.h"
-#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
-#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_radix_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
#include "mongo/db/storage/sorted_data_interface_test_harness.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
namespace {
-class BiggieSortedDataInterfaceTestHarnessHelper final
- : public virtual SortedDataInterfaceHarnessHelper {
+class SortedDataInterfaceTestHarnessHelper final
+ : public virtual mongo::SortedDataInterfaceHarnessHelper {
public:
- BiggieSortedDataInterfaceTestHarnessHelper() : _order(Ordering::make(BSONObj())) {}
+ SortedDataInterfaceTestHarnessHelper() : _order(Ordering::make(BSONObj())) {}
std::unique_ptr<mongo::SortedDataInterface> newIdIndexSortedDataInterface() final {
- std::string ns = "test.biggie";
+ std::string ns = "test.ephemeral_for_test";
OperationContextNoop opCtx(newRecoveryUnit().release());
BSONObj spec = BSON("key" << BSON("_id" << 1) << "name"
@@ -69,7 +69,7 @@ public:
std::unique_ptr<mongo::SortedDataInterface> newSortedDataInterface(bool unique,
bool partial) final {
- std::string ns = "test.biggie";
+ std::string ns = "test.ephemeral_for_test";
OperationContextNoop opCtx(newRecoveryUnit().release());
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
@@ -98,16 +98,14 @@ private:
std::list<IndexDescriptor> _descs;
};
-std::unique_ptr<mongo::SortedDataInterfaceHarnessHelper>
-makeBiggieSortedDataInterfaceHarnessHelper() {
- return std::make_unique<BiggieSortedDataInterfaceTestHarnessHelper>();
+std::unique_ptr<mongo::SortedDataInterfaceHarnessHelper> makeSortedDataInterfaceHarnessHelper() {
+ return std::make_unique<SortedDataInterfaceTestHarnessHelper>();
}
MONGO_INITIALIZER(RegisterSortedDataInterfaceHarnessFactory)(InitializerContext* const) {
- mongo::registerSortedDataInterfaceHarnessHelperFactory(
- makeBiggieSortedDataInterfaceHarnessHelper);
+ mongo::registerSortedDataInterfaceHarnessHelperFactory(makeSortedDataInterfaceHarnessHelper);
return Status::OK();
}
} // namespace
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp
index edd656abc77..3a20f37f214 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp
@@ -33,12 +33,12 @@
#include <algorithm>
-#include "mongo/db/storage/biggie/biggie_record_store.h"
-#include "mongo/db/storage/biggie/biggie_visibility_manager.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h"
+#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.h"
#include "mongo/db/storage/recovery_unit.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
/**
* Used by the visibility manager to register changes when the RecoveryUnit either commits or
@@ -121,5 +121,5 @@ void VisibilityManager::waitForAllEarlierOplogWritesToBeVisible(OperationContext
});
}
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.h
index b47097a0e7e..3380e13adaa 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.h
@@ -35,7 +35,7 @@
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
-namespace biggie {
+namespace ephemeral_for_test {
class RecordStore;
@@ -91,5 +91,5 @@ private:
std::set<RecordId> _uncommittedRecords; // RecordIds that have yet to be committed/rolled back.
};
-} // namespace biggie
+} // namespace ephemeral_for_test
} // namespace mongo
diff --git a/src/mongo/db/storage/kv/storage_engine_test.cpp b/src/mongo/db/storage/kv/storage_engine_test.cpp
index 8ef820be754..b34c3acd2ce 100644
--- a/src/mongo/db/storage/kv/storage_engine_test.cpp
+++ b/src/mongo/db/storage/kv/storage_engine_test.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/storage/devnull/devnull_kv_engine.h"
#include "mongo/db/storage/durable_catalog.h"
-#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h"
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/storage_engine_impl.h"
#include "mongo/db/storage/storage_engine_test_fixture.h"
@@ -80,8 +79,15 @@ TEST_F(StorageEngineTest, ReconcileIdentsTest) {
ASSERT_TRUE(idents.find("_mdb_catalog") != idents.end());
// Create a catalog entry for the `_id` index. Drop the created the table.
- ASSERT_OK(createIndex(
- opCtx.get(), NamespaceString("db.coll1"), "_id", false /* isBackgroundSecondaryBuild */));
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(createIndex(opCtx.get(),
+ NamespaceString("db.coll1"),
+ "_id",
+ false /* isBackgroundSecondaryBuild */));
+ wuow.commit();
+ }
+
ASSERT_OK(dropIndexTable(opCtx.get(), NamespaceString("db.coll1"), "_id"));
// The reconcile response should include this index as needing to be rebuilt.
reconcileResult = unittest::assertGet(reconcile(opCtx.get()));
@@ -206,7 +212,12 @@ TEST_F(StorageEngineTest, ReconcileUnfinishedIndex) {
// Start an non-backgroundSecondary single-phase (i.e. no build UUID) index.
const bool isBackgroundSecondaryBuild = false;
const boost::optional<UUID> buildUUID = boost::none;
- ASSERT_OK(startIndexBuild(opCtx.get(), ns, indexName, isBackgroundSecondaryBuild, buildUUID));
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(
+ startIndexBuild(opCtx.get(), ns, indexName, isBackgroundSecondaryBuild, buildUUID));
+ wuow.commit();
+ }
const auto indexIdent = _storageEngine->getCatalog()->getIndexIdent(
opCtx.get(), swCollInfo.getValue().catalogId, indexName);
@@ -238,7 +249,12 @@ TEST_F(StorageEngineTest, ReconcileUnfinishedBackgroundSecondaryIndex) {
// Start a backgroundSecondary single-phase (i.e. no build UUID) index.
const bool isBackgroundSecondaryBuild = true;
const boost::optional<UUID> buildUUID = boost::none;
- ASSERT_OK(startIndexBuild(opCtx.get(), ns, indexName, isBackgroundSecondaryBuild, buildUUID));
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(
+ startIndexBuild(opCtx.get(), ns, indexName, isBackgroundSecondaryBuild, buildUUID));
+ wuow.commit();
+ }
const auto indexIdent = _storageEngine->getCatalog()->getIndexIdent(
opCtx.get(), swCollInfo.getValue().catalogId, indexName);
@@ -279,8 +295,16 @@ TEST_F(StorageEngineTest, ReconcileTwoPhaseIndexBuilds) {
// Start two indexes with the same buildUUID to simulate building multiple indexes within the
// same build.
- ASSERT_OK(startIndexBuild(opCtx.get(), ns, indexA, isBackgroundSecondaryBuild, buildUUID));
- ASSERT_OK(startIndexBuild(opCtx.get(), ns, indexB, isBackgroundSecondaryBuild, buildUUID));
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(startIndexBuild(opCtx.get(), ns, indexA, isBackgroundSecondaryBuild, buildUUID));
+ wuow.commit();
+ }
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(startIndexBuild(opCtx.get(), ns, indexB, isBackgroundSecondaryBuild, buildUUID));
+ wuow.commit();
+ }
const auto indexIdentA = _storageEngine->getCatalog()->getIndexIdent(
opCtx.get(), swCollInfo.getValue().catalogId, indexA);
@@ -368,7 +392,12 @@ TEST_F(StorageEngineRepairTest, LoadCatalogRecoversOrphansInCatalog) {
// Only drop the catalog entry; storage engine still knows about this ident.
// This simulates an unclean shutdown happening between dropping the catalog entry and
// the actual drop in storage engine.
- ASSERT_OK(removeEntry(opCtx.get(), collNs.ns(), _storageEngine->getCatalog()));
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(removeEntry(opCtx.get(), collNs.ns(), _storageEngine->getCatalog()));
+ wuow.commit();
+ }
+
ASSERT(!collectionExists(opCtx.get(), collNs));
// When in a repair context, loadCatalog() recreates catalog entries for orphaned idents.
@@ -396,7 +425,11 @@ TEST_F(StorageEngineTest, LoadCatalogDropsOrphans) {
// Only drop the catalog entry; storage engine still knows about this ident.
// This simulates an unclean shutdown happening between dropping the catalog entry and
// the actual drop in storage engine.
- ASSERT_OK(removeEntry(opCtx.get(), collNs.ns(), _storageEngine->getCatalog()));
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(removeEntry(opCtx.get(), collNs.ns(), _storageEngine->getCatalog()));
+ wuow.commit();
+ }
ASSERT(!collectionExists(opCtx.get(), collNs));
// When in a normal startup context, loadCatalog() does not recreate catalog entries for
diff --git a/src/mongo/db/storage/storage_engine_test_fixture.h b/src/mongo/db/storage/storage_engine_test_fixture.h
index a6b5ed024cd..4401855b9fc 100644
--- a/src/mongo/db/storage/storage_engine_test_fixture.h
+++ b/src/mongo/db/storage/storage_engine_test_fixture.h
@@ -63,9 +63,12 @@ public:
options.uuid = UUID::gen();
RecordId catalogId;
std::unique_ptr<RecordStore> rs;
- std::tie(catalogId, rs) = unittest::assertGet(
- _storageEngine->getCatalog()->createCollection(opCtx, ns, options, true));
-
+ {
+ WriteUnitOfWork wuow(opCtx);
+ std::tie(catalogId, rs) = unittest::assertGet(
+ _storageEngine->getCatalog()->createCollection(opCtx, ns, options, true));
+ wuow.commit();
+ }
std::unique_ptr<Collection> coll = std::make_unique<CollectionMock>(ns, catalogId);
CollectionCatalog::get(opCtx).registerCollection(options.uuid.get(), &coll);
diff --git a/src/mongo/db/storage/storage_repair_observer_test.cpp b/src/mongo/db/storage/storage_repair_observer_test.cpp
index 310f48422f0..2fe3cc786d0 100644
--- a/src/mongo/db/storage/storage_repair_observer_test.cpp
+++ b/src/mongo/db/storage/storage_repair_observer_test.cpp
@@ -101,6 +101,11 @@ public:
return StorageRepairObserver::get(getServiceContext());
}
+ void setUp() {
+ ServiceContextMongoDTest::setUp();
+ storageGlobalParams.repair = true;
+ }
+
void tearDown() {
auto repairObserver = getRepairObserver();
if (_assertRepairIncompleteOnTearDown) {
@@ -117,6 +122,7 @@ public:
"mod_getDescription"_attr = mod.getDescription());
}
}
+ storageGlobalParams.repair = false;
}
private:
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index cd8ebc59871..96254643237 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -162,7 +162,6 @@ if not has_option('noshell') and usemozjs:
"$BUILD_DIR/mongo/db/repl/timestamp_block",
"$BUILD_DIR/mongo/db/server_options_core",
"$BUILD_DIR/mongo/db/sessions_collection_standalone",
- "$BUILD_DIR/mongo/db/storage/biggie/storage_biggie",
"$BUILD_DIR/mongo/db/storage/durable_catalog_impl",
"$BUILD_DIR/mongo/db/storage/ephemeral_for_test/storage_ephemeral_for_test",
"$BUILD_DIR/mongo/db/storage/storage_debug_util",