summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2015-03-10 13:42:08 -0500
committerGeert Bosch <geert@mongodb.com>2015-03-11 18:03:04 -0400
commitb4f647796a0c37fbb95d71aa9ee9b63fcbfcb782 (patch)
treed5edaf5ae175309b69af82ccc9c578900e3cad93
parentf4d17dd81431f9724006c0837ccac44068971b1d (diff)
downloadmongo-b4f647796a0c37fbb95d71aa9ee9b63fcbfcb782.tar.gz
SERVER-17391: Move RocksDB storage engine integration layer to new module
-rw-r--r--SConstruct1
-rw-r--r--etc/evergreen-rocksdb.yml1371
-rw-r--r--etc/evergreen.yml52
-rw-r--r--src/mongo/SConscript4
-rw-r--r--src/mongo/db/storage/rocks/SConscript89
-rw-r--r--src/mongo/db/storage/rocks/rocks_engine.cpp304
-rw-r--r--src/mongo/db/storage/rocks/rocks_engine.h160
-rw-r--r--src/mongo/db/storage/rocks/rocks_engine_test.cpp69
-rw-r--r--src/mongo/db/storage/rocks/rocks_global_options.cpp83
-rw-r--r--src/mongo/db/storage/rocks/rocks_global_options.h52
-rw-r--r--src/mongo/db/storage/rocks/rocks_index.cpp741
-rw-r--r--src/mongo/db/storage/rocks/rocks_index.h115
-rw-r--r--src/mongo/db/storage/rocks/rocks_index_test.cpp150
-rw-r--r--src/mongo/db/storage/rocks/rocks_init.cpp123
-rw-r--r--src/mongo/db/storage/rocks/rocks_options_init.cpp56
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.cpp903
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.h273
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store_mock.cpp41
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp172
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store_test.cpp461
-rw-r--r--src/mongo/db/storage/rocks/rocks_recovery_unit.cpp332
-rw-r--r--src/mongo/db/storage/rocks/rocks_recovery_unit.h157
-rw-r--r--src/mongo/db/storage/rocks/rocks_server_status.cpp116
-rw-r--r--src/mongo/db/storage/rocks/rocks_server_status.h51
-rw-r--r--src/mongo/db/storage/rocks/rocks_transaction.cpp160
-rw-r--r--src/mongo/db/storage/rocks/rocks_transaction.h132
-rw-r--r--src/mongo/db/storage/rocks/rocks_util.h48
27 files changed, 50 insertions, 6166 deletions
diff --git a/SConstruct b/SConstruct
index 82ab31a102a..1456ac071f5 100644
--- a/SConstruct
+++ b/SConstruct
@@ -226,7 +226,6 @@ add_option( "extralib", "comma separated list of libraries (--extralib js_stati
add_option( "mm", "use main memory instead of memory mapped files" , 0 , True )
add_option( "ssl" , "Enable SSL" , 0 , True )
add_option( "ssl-fips-capability", "Enable the ability to activate FIPS 140-2 mode", 0, True );
-add_option( "rocksdb" , "Enable RocksDB" , 0 , False )
add_option( "wiredtiger", "Enable wiredtiger", "?", True, "wiredtiger",
type="choice", choices=["on", "off"], const="on", default="on")
diff --git a/etc/evergreen-rocksdb.yml b/etc/evergreen-rocksdb.yml
deleted file mode 100644
index 7497ea0b5ae..00000000000
--- a/etc/evergreen-rocksdb.yml
+++ /dev/null
@@ -1,1371 +0,0 @@
-#####################################################
-# A note on expansions #
-#####################################################
-
-# Expansions usually appear in the form ${key|default}
-# If 'key' is found in the executor's map of currently known
-# expansions, the corresponding value is used. If the key can
-# not be found, the default is used.
-#
-# Arbitrary expansions can be specified in the YAML cofiguration
-# files in the following places:
-# - The 'expansions' field for buildvariants (branch file)
-# - The 'expansions' field for distros (distros file)
-#
-# A number of 'built-in' expansions are also available for use; these include:
-# - environment variables available on the host machine
-# - 'workdir' (references the executor's work directory).
-# - 'task_id' (references the task id of the task the executor is working on).
-# - 'build_variant' (references the executing task's buildvariant).
-# - 'config_root' (references the root directory for the executor's configuration artifacts).
-# Others include:
-# - 'builder'
-# - 'builder_num'
-# - 'builde
-
-functions:
- "fetch source" :
- command: git.get_project
- params:
- directory: src
- "get buildnumber":
- command: keyval.inc
- params:
- key: "${build_variant}_master"
- destination: "builder_num"
-
- "fetch binaries" :
- command: s3.get
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- remote_file: mongodb-mongo-master/${build_variant}/${revision}/binaries/mongo-${build_id}.${ext|tgz}
- bucket: mciuploads
- local_file: src/mongo-binaries.tgz
-
- "setup credentials" :
- command: shell.exec
- params:
- working_dir: src
- silent: true
- script: |
- cat > mci.buildlogger <<END_OF_CREDS
- slavename='${slave}'
- passwd='${passwd}'
- END_OF_CREDS
-
- "upload debugsymbols" :
- command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- local_file: src/mongo-debugsymbols.tgz
- remote_file: mongodb-mongo-master/${build_variant}/${revision}/debugsymbols/debugsymbols-${build_id}.${ext|tgz}
- bucket: mciuploads
- permissions: public-read
- content_type: ${content_type|application/x-gzip}
- build_variants:
- ['enterprise-linux-64-amazon-ami',
- 'enterprise-rhel-57-64-bit',
- 'enterprise-rhel-62-64-bit',
- 'enterprise-rhel-70-64-bit',
- 'enterprise-suse11-64',
- 'enterprise-ubuntu1204-64',
- 'enterprise-ubuntu1404-64',
- 'enterprise-debian71-64',
- 'linux-32',
- 'linux-64',
- 'amazon',
- 'rhel55',
- 'rhel62',
- 'rhel70',
- 'suse11',
- 'ubuntu1204',
- 'ubuntu1404',
- 'debian71',
- 'solaris-64-bit']
- "fetch debugsymbols archive" :
- command: s3.get
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- remote_file: mongodb-mongo-master/${build_variant}/${revision}/debugsymbols/debugsymbols-${build_id}.${ext|tgz}
- bucket: mciuploads
- local_file: src/mongo-debugsymbols.tgz
- build_variants:
- ['enterprise-linux-64-amazon-ami',
- 'enterprise-rhel-57-64-bit',
- 'enterprise-rhel-62-64-bit',
- 'enterprise-rhel-70-64-bit',
- 'enterprise-suse11-64',
- 'enterprise-ubuntu1204-64',
- 'enterprise-ubuntu1404-64',
- 'enterprise-debian71-64',
- 'linux-32',
- 'linux-64',
- 'amazon',
- 'rhel55',
- 'rhel62',
- 'rhel70',
- 'suse11',
- 'ubuntu1204',
- 'ubuntu1404',
- 'debian71',
- 'solaris-64-bit']
- "extract binaries" :
- command: shell.exec
- params:
- working_dir: src
- script: |
- ${decompress|unzip} mongo-binaries.tgz
- "fetch artifacts" :
- command: s3.get
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- remote_file: mongodb-mongo-master/${build_variant}/${revision}/artifacts/${build_id}.tgz
- bucket: mciuploads
- extract_to: src
- "build rocksdb" :
- command: shell.exec
- params:
- script: |
- set -o errexit
- set -o verbose
- if [ "${build_rocksdb|}" = "true" ]; then
- rm -rf rocksdb
- git clone https://github.com/facebook/rocksdb.git
- cd rocksdb
- make static_lib
- fi
- "build new tools" :
- command: shell.exec
- params:
- working_dir: src
- script: |
- set -o verbose
-
- # create the target directory for the binaries
- mkdir -p src/mongo-tools
-
- # clone into a different path so the binaries and package directory
- # names do not conflict
- ${gitvars} git clone https://github.com/mongodb/mongo-tools.git src/mongo-tools-repo
- cd src/mongo-tools-repo
-
- # make sure newlines in the scripts are handled correctly by windows
- if [ "Windows_NT" = "$OS" ]; then
- set -o igncr
- fi;
-
- git checkout r2.8.0-rc5
- . ./${set_tools_gopath|set_gopath.sh}
-
- for i in bsondump mongostat mongofiles mongoexport mongoimport mongorestore mongodump mongotop mongooplog; do
- ${gorootvars} go build -ldflags "-X github.com/mongodb/mongo-tools/common/options.Gitspec $(git rev-parse HEAD)" ${tooltags|} -o "../mongo-tools/$i${exe|}" $i/main/$i.go
- echo $i;
- done
- exit 0
-
-pre:
- - command: shell.exec
- params:
- silent: true
- script: |
- ${killall_mci|pkill -9 mongod; pkill -9 mongos; pkill -9 mongo; pkill -9 bsondump; pkill -9 mongoimport; pkill -9 mongoexport; pkill -9 mongodump; pkill -9 mongorestore; pkill -9 mongostat; pkill -9 mongofiles; pkill -9 mongooplog; pkill -9 mongotop; pkill -9 mongobridge; pkill -9 mongod-2.6; pkill -9 mongos-2.6; pkill -9 mongo-2.6; pkill -9 bsondump-2.6; pkill -9 mongoimport-2.6; pkill -9 mongoexport-2.6; pkill -9 mongodump-2.6; pkill -9 mongorestore-2.6; pkill -9 mongostat-2.6; pkill -9 mongofiles-2.6; pkill -9 mongooplog-2.6; pkill -9 mongotop-2.6; pkill -9 mongobridge-2.6; pkill -9 mongod-2.4; pkill -9 mongos-2.4; pkill -9 mongo-2.4; pkill -9 bsondump-2.4; pkill -9 mongoimport-2.4; pkill -9 mongoexport-2.4; pkill -9 mongodump-2.4; pkill -9 mongorestore-2.4; pkill -9 mongostat-2.4; pkill -9 mongofiles-2.4; pkill -9 mongooplog-2.4; pkill -9 mongotop-2.4; pkill -9 buildlogger.py; pkill -9 smoke.py; pkill -9 python; pkill -9 cl; pkill -9 lock_mgr_test; pkill -9 background_job_test; pkill -9 repl_coordinator_impl_heartbeat_test} >/dev/null 2>&1
- rm -rf src /data/db/*
- exit 0
-
-post:
- - command: attach.results
- params:
- file_location: src/report.json
- - command: shell.exec
- params:
- silent: true
- script: |
- ${killall_mci|pkill -9 mongod; pkill -9 mongos; pkill -9 mongo; pkill -9 bsondump; pkill -9 mongoimport; pkill -9 mongoexport; pkill -9 mongodump; pkill -9 mongorestore; pkill -9 mongostat; pkill -9 mongofiles; pkill -9 mongooplog; pkill -9 mongotop; pkill -9 mongobridge; pkill -9 mongod-2.6; pkill -9 mongos-2.6; pkill -9 mongo-2.6; pkill -9 bsondump-2.6; pkill -9 mongoimport-2.6; pkill -9 mongoexport-2.6; pkill -9 mongodump-2.6; pkill -9 mongorestore-2.6; pkill -9 mongostat-2.6; pkill -9 mongofiles-2.6; pkill -9 mongooplog-2.6; pkill -9 mongotop-2.6; pkill -9 mongobridge-2.6; pkill -9 mongod-2.4; pkill -9 mongos-2.4; pkill -9 mongo-2.4; pkill -9 bsondump-2.4; pkill -9 mongoimport-2.4; pkill -9 mongoexport-2.4; pkill -9 mongodump-2.4; pkill -9 mongorestore-2.4; pkill -9 mongostat-2.4; pkill -9 mongofiles-2.4; pkill -9 mongooplog-2.4; pkill -9 mongotop-2.4; pkill -9 buildlogger.py; pkill -9 smoke.py; pkill -9 python; pkill -9 cl; pkill -9 lock_mgr_test; pkill -9 background_job_test; pkill -9 repl_coordinator_impl_heartbeat_test} >/dev/null 2>&1
- exit 0
-
-timeout:
- - command: shell.exec
- params:
- working_dir: src
- script: |
- echo "Calling the Hang_Analyzer."
- python ./buildscripts/hang_analyzer.py
-
-### tasks to be run for the branch ###
-
-tasks:
-
-## compile ##
-- name: compile
- depends_on: []
- commands:
- - command: git.get_project
- params:
- directory: src
- - command: git.apply_patch
- params:
- directory: src
- - func: "get buildnumber"
- - func: "setup credentials"
- - func: "build new tools" #noop if ${newtools} is not "true"
- - func: "build rocksdb" # noop if ${build_rocksdb} is not "true"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- set -o errexit
- set -o verbose
- rm -rf ${install_directory|/data/mongo-install-directory}
- ${scons|scons} ${compile_flags|} --use-new-tools all dist ${msi_target|}
- ${python|python} -c "ver = open('version.txt','r').read().strip(); print 'suffix: latest' if ver[-1] == '-' else 'suffix: ' + ver; print 'version: ' + ver" > compile_expansions.yml
- if [ "${has_debugsymbols|}" = "true" ]; then ${scons|scons} ${compile_flags|} --nostrip --use-new-tools dist; original_filename=$(ls | grep debugsymbols); mv $original_filename $(echo $original_filename | sed 's/debugsymbols-//' | sed 's/mongodb/debugsymbols-mongodb/'); fi
- ${python|python} buildscripts/smoke.py --with-cleanbb --mode files --from-file build/unittests.txt --dont-start-mongod --report-file report.json --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} core
- mv mongodb*.${ext|tgz} mongodb-binaries.tgz
- mv debugsymbols-*.${ext|tgz} mongo-debugsymbols.tgz || true
- - command: archive.targz_pack
- params:
- target: "artifacts.tgz"
- source_dir: "src"
- include:
- - "src/mongo/db/modules/enterprise/jstests/**"
- - "compile_expansions.yml"
- - "src/mongo/db/modules/subscription/jstests/**"
- - "src/mongo/db/modules/enterprise/docs/**"
- - "*.exe"
- - "jstests/**"
- - "./test*"
- - "./dbtest*"
- - "./mongobridge*"
- - "buildscripts/**"
- - "*Example"
- - "*Test"
- - "./**.pdb"
- - "./**.msi"
- exclude_files:
- - "*_test.pdb"
- - func: "upload debugsymbols"
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- local_file: src/mongodb-binaries.tgz
- remote_file: mongodb-mongo-master/${build_variant}/${revision}/binaries/mongo-${build_id}.${ext|tgz}
- bucket: mciuploads
- permissions: public-read
- content_type: ${content_type|application/x-gzip}
- display_name: Binaries
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- local_file: artifacts.tgz
- remote_file: mongodb-mongo-master/${build_variant}/${revision}/artifacts/${build_id}.tgz
- bucket: mciuploads
- permissions: public-read
- content_type: application/tar
- display_name: Artifacts
-
-- name: lint
- depends_on: []
- commands:
- - command: git.get_project
- params:
- directory: src
- - command: git.apply_patch
- params:
- directory: src
- - command: shell.exec
- params:
- working_dir: src
- script: |
- set -o errexit
- set -o verbose
- ${scons|scons} ${compile_flags|} lint
-
-## integration test suites ##
-
-- name: aggregation
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} aggregation
-
-- name: aggregation_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --auth aggregation
-
-- name: audit
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} audit
-
-- name: auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} auth
-
-- name: dbtest
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} dbtest
-
-- name: disk
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} disk
-
-- name: durability
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} dur
-
-- name: failpoints
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} failPoint
-
-- name: failpoints_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --auth failPoint
-
-- name: gle_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} gle --auth
-
-- name: gle_auth_write_cmd
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} gle --auth --shell-write-mode commands
-
-- name: heap1_js
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --storageEngine=heap1 --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} js
-
-- name: jsCore
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --shell-write-mode commands --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} jsCore
-
-- name: jsCore_compatibility
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --shell-write-mode compatibility --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} jsCore
-
-- name: jsCore_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --shell-write-mode commands --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --auth jsCore
-
-- name: jsCore_small_oplog
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --shell-write-mode commands --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --small-oplog jsCore
-
-- name: mongo-perf
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- git clone git://github.com/mongodb/mongo-perf.git
- cd mongo-perf/mongo-cxx-driver && ${scons|scons}
- cd mongo-perf && ${scons|scons}
- cd mongo-perf && touch mongod.conf
- if [ "${build_variant|}" = "linux-64-new-query-framework" ] ; then cd mongo-perf && (echo "setParameter = newQueryFrameworkEnabled=1" | tee -a mongod.conf) fi
- cd mongo-perf && ${python|python} runner.py --config mongod.conf --rhost mongoperf-db.10gen.cc --port 30000 --mongod ../mongod --label ${build_variant|}
-
-- name: mmap
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} mmap_v1
-
-- name: mongosTest
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} mongosTest
-
-- name: mongosTest_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --auth mongosTest
-
-- name: multiversion
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- rm -rf /data/install /data/multiversion
- ${python|python} buildscripts/setup_multiversion_mongodb.py /data/install /data/multiversion "Linux/x86_64" "1.8" "2.0" "2.2" "2.4" "2.6"
- PATH=$PATH:/data/multiversion ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} multiVersion
-
-- name: noPassthrough
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} noPassthrough
-
-- name: noPassthroughWithMongod
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} noPassthroughWithMongod
-
-- name: slow1
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} slow1
-
-- name: slow2
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --clean-every=1 --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} slow2
-
-- name: parallel
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} parallel
-
-- name: parallel_compatibility
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --shell-write-mode compatibility --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} parallel
-
-- name: concurrency
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} concurrency
-
-- name: concurrency_compatibility
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --shell-write-mode compatibility --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} concurrency
-
-- name: replicasets
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} replSets
-
-- name: replicasets_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --auth replSets
-
-- name: replication
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} repl
-
-- name: replication_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --auth repl
-
-- name: sasl
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} sasl
-
-- name: sharding
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} sharding
-
-- name: sharding_auth
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --auth sharding
-
-- name: snmp
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- mkdir -p snmpconf
- cp -f src/mongo/db/modules/enterprise/docs/mongod.conf.master snmpconf/mongod.conf
- SNMPCONFPATH=snmpconf ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} snmp
-
-- name: ssl
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} ssl --use-ssl
-
-- name: sslSpecial
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} sslSpecial
-
-- name: tool
- depends_on:
- - name: compile
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "extract binaries"
- - func: "setup credentials"
- - func: "get buildnumber"
- - command: shell.exec
- params:
- working_dir: src
- script: |
- cp mongodb*/bin/* .
- ${python|python} buildscripts/smoke.py --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json ${test_flags|} --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} tool
-
-- name: push
- depends_on:
- - name: "*"
- stepback: false
- commands:
- - func: "fetch artifacts"
- - func: "fetch binaries"
- - func: "fetch debugsymbols archive"
- - command: shell.exec
- params:
- working_dir: src
- silent: true
- script: |
- set -o errexit
- echo "${signing_auth_token}" > signing_auth_token
- - command: shell.exec
- params:
- working_dir: src
- script: |
- set -o errexit
- set -o verbose
- mv mongo-binaries.tgz mongodb-binaries.${ext|tgz}
- mv mongo-debugsymbols.tgz debugsymbols-*.${ext|tgz} || true
- cp mongodb-*.${ext|tgz} mongodb-${push_name}-${push_arch}-latest.${ext|tgz}
- cp debugsymbols-*.${ext|tgz} mongodb-${push_name}-${push_arch}-debugsymbols-latest.${ext|tgz} || true
- /usr/bin/find build/ -type f | grep msi$ | xargs -I original_filename cp original_filename mongodb-win32-${push_arch}-latest.msi || true
-
- notary-client.py --key-name "server-2.8" --auth-token-file ./signing_auth_token --comment "MCI Automatic Signing ${revision} - ${build_variant} - ${branch_name}" --notary-url http://notary-service.build.10gen.cc:5000 --skip-missing mongodb-${push_name}-${push_arch}-latest.${ext|tgz} mongodb-${push_name}-${push_arch}-debugsymbols-latest.${ext|tgz} mongodb-win32-${push_arch}-latest.msi
- rm signing_auth_token
-
- - command: expansions.update
- params:
- file: src/compile_expansions.yml
-
- # Put the binaries tarball/zipfile
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- local_file: src/mongodb-${push_name}-${push_arch}-latest.${ext|tgz}
- aws_key: ${aws_key}
- bucket: build-push-testing
- permissions: public-read
- content_type: ${content_type|application/x-gzip}
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}
-
- # Put the debug symbols
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- aws_key: ${aws_key}
- build_variants: ["enterprise-linux-64-amazon-ami", "enterprise-rhel-57-64-bit", "enterprise-rhel-62-64-bit", "enterprise-rhel-70-64-bit", "enterprise-suse11-64", "enterprise-ubuntu1204-64", "enterprise-ubuntu1404-64", "enterprise-debian71-64", "linux-32", "linux-64", "amazon", "rhel55", "rhel62", "rhel70", "suse11", "ubuntu1204", "ubuntu1404", "debian71", "solaris-64-bit"]
- permissions: public-read
- local_file: src/mongodb-${push_name}-${push_arch}-debugsymbols-latest.${ext|tgz}
- bucket: build-push-testing
- content_type: ${content_type|application/x-gzip}
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}
-
- # Put the binaries tarball signature
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- local_file: src/mongodb-${push_name}-${push_arch}-latest.${ext|tgz}.sig
- aws_key: ${aws_key}
- bucket: build-push-testing
- permissions: public-read
- content_type: ${content_type|application/x-gzip}
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.sig
-
- # Put the debug symbols signature
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- aws_key: ${aws_key}
- build_variants: ["enterprise-linux-64-amazon-ami", "enterprise-rhel-57-64-bit", "enterprise-rhel-62-64-bit", "enterprise-rhel-70-64-bit", "enterprise-suse11-64", "enterprise-ubuntu1204-64", "enterprise-ubuntu1404-64", "enterprise-debian71-64", "linux-32", "linux-64", "amazon", "rhel55", "rhel62", "rhel70", "suse11", "ubuntu1204", "ubuntu1404", "debian71", "solaris-64-bit"]
- permissions: public-read
- local_file: src/mongodb-${push_name}-${push_arch}-debugsymbols-latest.${ext|tgz}.sig
- bucket: build-push-testing
- content_type: ${content_type|application/x-gzip}
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.sig
-
- # Put the signed MSI file
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- permissions: public-read
- build_variants: ["enterprise-windows-64", "windows-64", "windows-64-2k8", "windows-64-2k8-ssl", "windows-32"]
- local_file: src/mongodb-win32-${push_arch}-latest-signed.msi
- bucket: build-push-testing
- content_type: application/x-msi
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi
-
- # Put the binaries tarball sha1
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- local_file: src/mongodb-${push_name}-${push_arch}-latest.${ext|tgz}.sha1
- aws_key: ${aws_key}
- permissions: public-read
- bucket: build-push-testing
- content_type: text/plain
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.sha1
-
- # Put the debug symbols sha1
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- aws_key: ${aws_key}
- build_variants: ["enterprise-linux-64-amazon-ami", "enterprise-rhel-57-64-bit", "enterprise-rhel-62-64-bit", "enterprise-rhel-70-64-bit", "enterprise-suse11-64", "enterprise-ubuntu1204-64", "enterprise-ubuntu1404-64", "enterprise-debian71-64", "linux-32", "linux-64", "amazon", "rhel55", "rhel62", "rhel70", "suse11", "ubuntu1204", "ubuntu1404", "debian71", "solaris-64-bit"]
- permissions: public-read
- local_file: src/mongodb-${push_name}-${push_arch}-debugsymbols-latest.${ext|tgz}.sha1
- bucket: build-push-testing
- content_type: text/plain
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.sha1
-
- # Push the signed MSI sha1
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- permissions: public-read
- build_variants: ["enterprise-windows-64", "windows-64", "windows-64-2k8", "windows-64-2k8-ssl", "windows-32"]
- local_file: src/mongodb-win32-${push_arch}-latest-signed.msi.sha1
- bucket: build-push-testing
- content_type: text/plain
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi.sha1
-
- # Put the binaries tarball sha256
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- local_file: src/mongodb-${push_name}-${push_arch}-latest.${ext|tgz}.sha256
- permissions: public-read
- aws_key: ${aws_key}
- bucket: build-push-testing
- content_type: text/plain
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.sha256
-
- # Put the debug symbols sha256
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- build_variants: ["enterprise-linux-64-amazon-ami", "enterprise-rhel-57-64-bit", "enterprise-rhel-62-64-bit", "enterprise-rhel-70-64-bit", "enterprise-suse11-64", "enterprise-ubuntu1204-64", "enterprise-ubuntu1404-64", "enterprise-debian71-64", "linux-32", "linux-64", "amazon", "rhel55", "rhel62", "rhel70", "suse11", "ubuntu1204", "ubuntu1404", "debian71", "solaris-64-bit"]
- local_file: src/mongodb-${push_name}-${push_arch}-debugsymbols-latest.${ext|tgz}.sha256
- aws_key: ${aws_key}
- bucket: build-push-testing
- permissions: public-read
- content_type: text/plain
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.sha256
-
- # Put the signed MSI sha256
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- build_variants: ["enterprise-windows-64", "windows-64", "windows-64-2k8", "windows-64-2k8-ssl", "windows-32"]
- local_file: src/mongodb-win32-${push_arch}-latest-signed.msi.sha256
- bucket: build-push-testing
- permissions: public-read
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi.sha256
- content_type: text/plain
-
- # Put the binaries tarball md5
- - command: s3.put
- params:
- aws_secret: ${aws_secret}
- local_file: src/mongodb-${push_name}-${push_arch}-latest.${ext|tgz}.md5
- aws_key: ${aws_key}
- bucket: build-push-testing
- permissions: public-read
- content_type: text/plain
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.md5
-
- # Put the debug symbols md5
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- build_variants: ["enterprise-linux-64-amazon-ami", "enterprise-rhel-57-64-bit", "enterprise-rhel-62-64-bit", "enterprise-rhel-70-64-bit", "enterprise-suse11-64", "enterprise-ubuntu1204-64", "enterprise-ubuntu1404-64", "linux-32", "linux-64", "amazon", "rhel55", "rhel62", "rhel70", "suse11", "ubuntu1204", "ubuntu1404", "debian71", "solaris-64-bit"]
- local_file: src/mongodb-${push_name}-${push_arch}-debugsymbols-latest.${ext|tgz}.md5
- bucket: build-push-testing
- content_type: text/plain
- permissions: public-read
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.md5
-
- # Put the signed MSI md5
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- build_variants: ["enterprise-windows-64", "windows-64", "windows-64-2k8", "windows-64-2k8-ssl", "windows-32"]
- local_file: src/mongodb-win32-${push_arch}-latest-signed.msi.md5
- bucket: build-push-testing
- permissions: public-read
- content_type: text/plain
- remote_file: ${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi.md5
-
- - command: s3Copy.copy
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- s3_copy_files:
- #Binaries
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}', 'bucket': '${push_bucket}'}}
-
- #Debug Symbols
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}', 'bucket': '${push_bucket}'},
- 'build_variants':
- ['enterprise-linux-64-amazon-ami',
- 'enterprise-rhel-57-64-bit',
- 'enterprise-rhel-62-64-bit',
- 'enterprise-suse11-64',
- 'enterprise-ubuntu1204-64',
- 'enterprise-ubuntu1404-64',
- 'enterprise-debian71-64',
- 'linux-32',
- 'linux-64',
- 'amazon',
- 'rhel55',
- 'rhel62',
- 'rhel70',
- 'suse11',
- 'ubuntu1204',
- 'ubuntu1404',
- 'debian71',
- 'solaris-64-bit']
- }
-
- #MSI (Windows only)
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-win32-${push_arch}-${suffix}-signed.msi', 'bucket': '${push_bucket}'},
- 'build_variants': [ 'enterprise-windows-64', 'windows-64', 'windows-64-2k8', 'windows-64-2k8-ssl', 'windows-32' ] }
-
- #Binaries Signature
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.sig', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig', 'bucket': '${push_bucket}'}}
-
- #Debug Symbols Signature
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.sig', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sig', 'bucket': '${push_bucket}'},
- 'build_variants':
- ['enterprise-linux-64-amazon-ami',
- 'enterprise-rhel-57-64-bit',
- 'enterprise-rhel-62-64-bit',
- 'enterprise-suse11-64',
- 'enterprise-ubuntu1204-64',
- 'enterprise-ubuntu1404-64',
- 'enterprise-debian71-64',
- 'linux-32',
- 'linux-64',
- 'amazon',
- 'rhel55',
- 'rhel62',
- 'rhel70',
- 'suse11',
- 'ubuntu1204',
- 'ubuntu1404',
- 'debian71',
- 'solaris-64-bit']
- }
-
- #SHA1 for binaries
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.sha1', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1', 'bucket': '${push_bucket}'}}
-
- #SHA1 for debug symbols
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.sha1', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha1', 'bucket': '${push_bucket}'},
- 'build_variants': [
- 'enterprise-linux-64-amazon-ami',
- 'enterprise-rhel-57-64-bit',
- 'enterprise-rhel-62-64-bit',
- 'enterprise-suse11-64',
- 'enterprise-ubuntu1204-64',
- 'enterprise-ubuntu1404-64',
- 'enterprise-debian71-64',
- 'linux-32',
- 'linux-64',
- 'amazon',
- 'rhel55',
- 'rhel62',
- 'rhel70',
- 'suse11',
- 'ubuntu1204',
- 'ubuntu1404',
- 'debian71',
- 'solaris-64-bit'
- ]
- }
-
- #SHA1 for MSI
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi.sha1', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-win32-${push_arch}-${suffix}-signed.msi.sha1', 'bucket': '${push_bucket}'},
- 'build_variants': ['enterprise-windows-64', 'windows-64', 'windows-64-2k8', 'windows-64-2k8-ssl', 'windows-32'] }
-
- #SHA256 for binaries
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.sha256', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256', 'bucket': '${push_bucket}'}}
-
- #SHA256 for debugsymbols
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.sha256', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha256', 'bucket': '${push_bucket}'},
- 'build_variants': [
- 'enterprise-linux-64-amazon-ami',
- 'enterprise-rhel-57-64-bit',
- 'enterprise-rhel-62-64-bit',
- 'enterprise-suse11-64',
- 'enterprise-ubuntu1204-64',
- 'enterprise-ubuntu1404-64',
- 'enterprise-debian71-64',
- 'linux-32',
- 'linux-64',
- 'amazon',
- 'rhel55',
- 'rhel62',
- 'rhel70',
- 'suse11',
- 'ubuntu1204',
- 'ubuntu1404',
- 'debian71',
- 'solaris-64-bit'
- ]}
-
- #SHA256 for MSI files
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi.sha256', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-win32-${push_arch}-${suffix}-signed.msi.sha256', 'bucket': '${push_bucket}'},
- 'build_variants': ['enterprise-windows-64', 'windows-64', 'windows-64-2k8', 'windows-64-2k8-ssl', 'windows-32'], }
-
-
- #MD5 for binaries
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}.${ext|tgz}.md5', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5', 'bucket': '${push_bucket}'}}
-
- #MD5 for debugsymbols
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}-${task_id}.${ext|tgz}.md5', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.md5', 'bucket': '${push_bucket}'},
- 'build_variants': [
- 'enterprise-linux-64-amazon-ami',
- 'enterprise-rhel-57-64-bit',
- 'enterprise-rhel-62-64-bit',
- 'enterprise-suse11-64',
- 'enterprise-ubuntu1204-64',
- 'enterprise-ubuntu1404-64',
- 'linux-32',
- 'linux-64',
- 'amazon',
- 'rhel55',
- 'rhel62',
- 'rhel70',
- 'suse11',
- 'ubuntu1204',
- 'ubuntu1404',
- 'debian71',
- 'solaris-64-bit']
- }
-
- #MD5 for MSIs
- - {'source': {'path': '${push_path}-STAGE/${push_name}/mongodb-win32-${push_arch}-${suffix}-${task_id}-signed.msi.md5', 'bucket': 'build-push-testing'},
- 'destination': {'path': '${push_path}/mongodb-win32-${push_arch}-${suffix}-signed.msi.md5', 'bucket': '${push_bucket}'},
- 'build_variants': ['enterprise-windows-64', 'windows-64', 'windows-64-2k8', 'windows-64-2k8-ssl', 'windows-32'], }
- - command: shell.exec
- params:
- working_dir: src
- script: |
- ssh distro-deb2.build.10gen.cc sudo -H -u ubuntu /home/ubuntu/git/kernel-tools/releases/publish_packages/publish_packages.sh master ${version} ${revision} || true
-
-### modules ###
-
-modules:
-- name: enterprise
- repo: git@github.com:10gen/mongo-enterprise-modules.git
- prefix: src/mongo/db/modules
- branch: master
-
-- name: bdb
- repo: git@github.com:10gen/bdb-module.git
- prefix: src/mongo/db/modules
- branch: master
-
-#######################################
-# Buildvariants
-#######################################
-
-buildvariants:
-
-###########################################
-# Experimental buildvariants #
-###########################################
-
-- name: ubuntu1404-rocksdb
- display_name: Ubuntu 14.04 64-bit (RocksDB)
- run_on:
- - ubuntu1404-test
- expansions:
- build_rocksdb: true
- compile_flags: --c++11 -j$(grep -c ^processor /proc/cpuinfo) --dbg=off --opt=on --cpppath=$(readlink -f ../rocksdb/include/) --libpath=$(readlink -f ../rocksdb/) --extralib=rocksdb --rocksdb --cc=/opt/mongodbtoolchain/bin/gcc --cxx=/opt/mongodbtoolchain/bin/g++ --variant-dir=release
- test_flags: --continue-on-failure --storageEngine=RocksDB
- tasks:
- - name: compile
- distros:
- - ubuntu1404-build
- - name: lint
- - name: aggregation
- - name: aggregation_auth
- - name: auth
- - name: dbtest
- # - name: disk
- # - name: durability
- - name: failpoints
- - name: failpoints_auth
- - name: gle_auth
- - name: gle_auth_write_cmd
- - name: jsCore
- - name: jsCore_auth
- - name: jsCore_compatibility
- - name: jsCore_small_oplog
- - name: noPassthrough
- - name: noPassthroughWithMongod
- - name: parallel
- - name: parallel_compatibility
- - name: concurrency
- - name: concurrency_compatibility
- - name: replicasets
- - name: replicasets_auth
- - name: replication
- - name: replication_auth
- - name: sharding
- - name: sharding_auth
- - name: slow1
- - name: slow2
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index a2b60964384..4df87067780 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -1717,8 +1717,8 @@ modules:
prefix: src/mongo/db/modules
branch: master
-- name: bdb
- repo: git@github.com:10gen/bdb-module.git
+- name: rocksdb
+ repo: git@github.com:mongodb-partners/mongo-rocks.git
prefix: src/mongo/db/modules
branch: master
@@ -3512,6 +3512,54 @@ buildvariants:
distros:
- rhel55-test
+##############################################
+# mongo-partner storage engine buildvariants #
+##############################################
+
+- name: ubuntu1404-rocksdb
+ display_name: Ubuntu 14.04 64-bit (RocksDB)
+ modules:
+ - rocksdb
+ run_on:
+ - ubuntu1404-test
+ expansions:
+ build_rocksdb: true
+ compile_flags: --c++11 -j$(grep -c ^processor /proc/cpuinfo) --dbg=off --opt=on --cpppath=$(readlink -f ../rocksdb/include/) --libpath=$(readlink -f ../rocksdb/) --extralib=rocksdb --cc=/opt/mongodbtoolchain/bin/gcc --cxx=/opt/mongodbtoolchain/bin/g++ --variant-dir=release
+ test_flags: --continue-on-failure --storageEngine=RocksDB
+ tasks:
+ - name: compile
+ distros:
+ - ubuntu1404-build
+ # - name: lint
+ - name: aggregation
+ - name: aggregation_auth
+ - name: auth
+ - name: dbtest
+ # - name: disk
+ # - name: durability
+ - name: failpoints
+ - name: failpoints_auth
+ - name: gle_auth
+ - name: gle_auth_write_cmd
+ - name: jsCore
+ - name: jsCore_auth
+ - name: jsCore_compatibility
+ - name: jsCore_small_oplog
+ - name: noPassthrough
+ - name: noPassthroughWithMongod
+ - name: parallel
+ - name: parallel_compatibility
+ - name: concurrency
+ - name: concurrency_compatibility
+ - name: replicasets
+ - name: replicasets_auth
+ - name: replication
+ - name: replication_auth
+ - name: sharding
+ - name: sharding_auth
+ - name: slow1
+ - name: slow2
+
###########################################
# Experimental buildvariants #
###########################################
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index d983cae0f90..89c4bfbbed5 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -42,7 +42,6 @@ env.SConscript(['base/SConscript',
'db/storage/in_memory/SConscript',
'db/storage/kv/SConscript',
'db/storage/mmap_v1/SConscript',
- 'db/storage/rocks/SConscript',
'db/storage/wiredtiger/SConscript',
'db/SConscript',
'installer/msi/SConscript',
@@ -1028,9 +1027,6 @@ serveronlyLibdeps = ["coreshard",
'elapsed_tracker',
'$BUILD_DIR/third_party/shim_snappy']
-if has_option("rocksdb" ):
- serveronlyLibdeps.append( 'db/storage/rocks/storage_rocks' )
-
if wiredtiger:
serveronlyLibdeps.append( 'db/storage/wiredtiger/storage_wiredtiger' )
serveronlyLibdeps.append( '$BUILD_DIR/third_party/shim_wiredtiger')
diff --git a/src/mongo/db/storage/rocks/SConscript b/src/mongo/db/storage/rocks/SConscript
deleted file mode 100644
index 8c43d94c303..00000000000
--- a/src/mongo/db/storage/rocks/SConscript
+++ /dev/null
@@ -1,89 +0,0 @@
-Import("env")
-Import("has_option")
-
-if has_option("rocksdb"):
-
- env.Library(
- target= 'storage_rocks_base',
- source= [
- 'rocks_global_options.cpp',
- 'rocks_engine.cpp',
- 'rocks_record_store.cpp',
- 'rocks_recovery_unit.cpp',
- 'rocks_index.cpp',
- 'rocks_transaction.cpp',
- ],
- LIBDEPS= [
- '$BUILD_DIR/mongo/bson',
- '$BUILD_DIR/mongo/db/catalog/collection_options',
- '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
- '$BUILD_DIR/mongo/db/index/index_descriptor',
- '$BUILD_DIR/mongo/db/storage/bson_collection_catalog_entry',
- '$BUILD_DIR/mongo/db/storage/index_entry_comparison',
- '$BUILD_DIR/mongo/db/storage/key_string',
- '$BUILD_DIR/mongo/db/storage/oplog_hack',
- '$BUILD_DIR/mongo/foundation',
- '$BUILD_DIR/mongo/processinfo',
- '$BUILD_DIR/third_party/shim_snappy',
- ],
- SYSLIBDEPS=["rocksdb",
- "z",
- "bz2"] #z and bz2 are dependencies for rocks
- )
-
- env.Library(
- target= 'storage_rocks',
- source= [
- 'rocks_init.cpp',
- 'rocks_options_init.cpp',
- 'rocks_record_store_mongod.cpp',
- 'rocks_server_status.cpp',
- ],
- LIBDEPS= [
- 'storage_rocks_base',
- '$BUILD_DIR/mongo/db/storage/kv/kv_engine'
- ]
- )
-
- env.Library(
- target= 'storage_rocks_mock',
- source= [
- 'rocks_record_store_mock.cpp',
- ],
- LIBDEPS= [
- 'storage_rocks_base',
- ]
- )
-
-
- env.CppUnitTest(
- target='storage_rocks_index_test',
- source=['rocks_index_test.cpp'
- ],
- LIBDEPS=[
- 'storage_rocks_mock',
- '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness'
- ]
- )
-
-
- env.CppUnitTest(
- target='storage_rocks_record_store_test',
- source=['rocks_record_store_test.cpp'
- ],
- LIBDEPS=[
- 'storage_rocks_mock',
- '$BUILD_DIR/mongo/db/storage/record_store_test_harness'
- ]
- )
-
- env.CppUnitTest(
- target='storage_rocks_engine_test',
- source=['rocks_engine_test.cpp'
- ],
- LIBDEPS=[
- 'storage_rocks_mock',
- '$BUILD_DIR/mongo/db/storage/kv/kv_engine_test_harness'
- ]
- )
-
diff --git a/src/mongo/db/storage/rocks/rocks_engine.cpp b/src/mongo/db/storage/rocks/rocks_engine.cpp
deleted file mode 100644
index e0955aeb848..00000000000
--- a/src/mongo/db/storage/rocks/rocks_engine.cpp
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/rocks/rocks_engine.h"
-
-#include <boost/filesystem/operations.hpp>
-#include <boost/make_shared.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/scoped_ptr.hpp>
-
-#include <rocksdb/cache.h>
-#include <rocksdb/comparator.h>
-#include <rocksdb/db.h>
-#include <rocksdb/slice.h>
-#include <rocksdb/options.h>
-#include <rocksdb/table.h>
-#include <rocksdb/utilities/convenience.h>
-#include <rocksdb/filter_policy.h>
-#include <rocksdb/utilities/write_batch_with_index.h>
-
-#include "mongo/db/catalog/collection_options.h"
-#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/rocks/rocks_global_options.h"
-#include "mongo/db/storage/rocks/rocks_record_store.h"
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-#include "mongo/db/storage/rocks/rocks_index.h"
-#include "mongo/platform/endian.h"
-#include "mongo/util/log.h"
-#include "mongo/util/processinfo.h"
-
-#define ROCKS_TRACE log()
-
-#define ROCKS_STATUS_OK( s ) if ( !( s ).ok() ) { error() << "rocks error: " << ( s ).ToString(); \
- invariant( false ); }
-
-namespace mongo {
-
- namespace {
- // we encode prefixes in big endian because we want to quickly jump to the max prefix
- // (iter->SeekToLast())
- bool extractPrefix(const rocksdb::Slice& slice, uint32_t* prefix) {
- if (slice.size() < sizeof(uint32_t)) {
- return false;
- }
- *prefix = endian::bigToNative(*reinterpret_cast<const uint32_t*>(slice.data()));
- return true;
- }
-
- std::string encodePrefix(uint32_t prefix) {
- uint32_t bigEndianPrefix = endian::nativeToBig(prefix);
- return std::string(reinterpret_cast<const char*>(&bigEndianPrefix), sizeof(uint32_t));
- }
- } // anonymous namespace
-
- // first four bytes are the default prefix 0
- const std::string RocksEngine::kMetadataPrefix("\0\0\0\0metadata-", 12);
-
- RocksEngine::RocksEngine(const std::string& path, bool durable)
- : _path(path), _durable(durable) {
- { // create block cache
- uint64_t cacheSizeGB = rocksGlobalOptions.cacheSizeGB;
- if (cacheSizeGB == 0) {
- ProcessInfo pi;
- unsigned long long memSizeMB = pi.getMemSizeMB();
- if (memSizeMB > 0) {
- double cacheMB = memSizeMB / 2;
- cacheSizeGB = static_cast<uint64_t>(cacheMB / 1024);
- }
- if (cacheSizeGB < 1) {
- cacheSizeGB = 1;
- }
- }
- _block_cache = rocksdb::NewLRUCache(cacheSizeGB * 1024 * 1024 * 1024LL);
- }
- // open DB
- rocksdb::DB* db;
- auto s = rocksdb::DB::Open(_options(), path, &db);
- ROCKS_STATUS_OK(s);
- _db.reset(db);
-
- // open iterator
- boost::scoped_ptr<rocksdb::Iterator> _iter(_db->NewIterator(rocksdb::ReadOptions()));
-
- // find maxPrefix
- _maxPrefix = 0;
- _iter->SeekToLast();
- if (_iter->Valid()) {
- // otherwise the DB is empty, so we just keep it at 0
- bool ok = extractPrefix(_iter->key(), &_maxPrefix);
- // this is DB corruption here
- invariant(ok);
- }
-
- // load ident to prefix map
- {
- boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
- for (_iter->Seek(kMetadataPrefix);
- _iter->Valid() && _iter->key().starts_with(kMetadataPrefix); _iter->Next()) {
- rocksdb::Slice ident(_iter->key());
- ident.remove_prefix(kMetadataPrefix.size());
- // this could throw DBException, which then means DB corruption. We just let it fly
- // to the caller
- BSONObj identConfig(_iter->value().data());
- BSONElement element = identConfig.getField("prefix");
- // TODO: SERVER-16979 Correctly handle errors returned by RocksDB
- // This is DB corruption
- invariant(!element.eoo() || !element.isNumber());
- uint32_t identPrefix = static_cast<uint32_t>(element.numberInt());
- _identPrefixMap[StringData(ident.data(), ident.size())] = identPrefix;
- }
- }
- }
-
- RocksEngine::~RocksEngine() {}
-
- RecoveryUnit* RocksEngine::newRecoveryUnit() {
- return new RocksRecoveryUnit(&_transactionEngine, _db.get(), _durable);
- }
-
- Status RocksEngine::createRecordStore(OperationContext* opCtx, StringData ns, StringData ident,
- const CollectionOptions& options) {
- return _createIdentPrefix(ident);
- }
-
- RecordStore* RocksEngine::getRecordStore(OperationContext* opCtx, StringData ns,
- StringData ident, const CollectionOptions& options) {
- if (options.capped) {
- return new RocksRecordStore(
- ns, ident, _db.get(), _getIdentPrefix(ident), true,
- options.cappedSize ? options.cappedSize : 4096, // default size
- options.cappedMaxDocs ? options.cappedMaxDocs : -1);
- } else {
- return new RocksRecordStore(ns, ident, _db.get(), _getIdentPrefix(ident));
- }
- }
-
- Status RocksEngine::createSortedDataInterface(OperationContext* opCtx, StringData ident,
- const IndexDescriptor* desc) {
- return _createIdentPrefix(ident);
- }
-
- SortedDataInterface* RocksEngine::getSortedDataInterface(OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc) {
- if (desc->unique()) {
- return new RocksUniqueIndex(_db.get(), _getIdentPrefix(ident), ident.toString(),
- Ordering::make(desc->keyPattern()));
- } else {
- return new RocksStandardIndex(_db.get(), _getIdentPrefix(ident), ident.toString(),
- Ordering::make(desc->keyPattern()));
- }
- }
-
- Status RocksEngine::dropIdent(OperationContext* opCtx, StringData ident) {
- // TODO optimize this using CompactionFilterV2
- rocksdb::WriteBatch wb;
- wb.Delete(kMetadataPrefix + ident.toString());
-
- std::string prefix = _getIdentPrefix(ident);
- rocksdb::Slice prefixSlice(prefix.data(), prefix.size());
-
- boost::scoped_ptr<rocksdb::Iterator> _iter(_db->NewIterator(rocksdb::ReadOptions()));
- for (_iter->Seek(prefixSlice); _iter->Valid() && _iter->key().starts_with(prefixSlice);
- _iter->Next()) {
- ROCKS_STATUS_OK(_iter->status());
- wb.Delete(_iter->key());
- }
- auto s = _db->Write(rocksdb::WriteOptions(), &wb);
- if (!s.ok()) {
- return toMongoStatus(s);
- }
-
- {
- boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
- _identPrefixMap.erase(ident);
- }
-
- return Status::OK();
- }
-
- bool RocksEngine::hasIdent(OperationContext* opCtx, StringData ident) const {
- boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
- return _identPrefixMap.find(ident) != _identPrefixMap.end();
- }
-
- std::vector<std::string> RocksEngine::getAllIdents(OperationContext* opCtx) const {
- std::vector<std::string> indents;
- for (auto& entry : _identPrefixMap) {
- indents.push_back(entry.first);
- }
- return indents;
- }
-
- // non public api
- Status RocksEngine::_createIdentPrefix(StringData ident) {
- uint32_t prefix = 0;
- {
- boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
- if (_identPrefixMap.find(ident) != _identPrefixMap.end()) {
- // already exists
- return Status::OK();
- }
-
- prefix = ++_maxPrefix;
- _identPrefixMap[ident] = prefix;
- }
-
- BSONObjBuilder builder;
- builder.append("prefix", static_cast<int32_t>(prefix));
- BSONObj config = builder.obj();
-
- auto s = _db->Put(rocksdb::WriteOptions(), kMetadataPrefix + ident.toString(),
- rocksdb::Slice(config.objdata(), config.objsize()));
-
- return toMongoStatus(s);
- }
-
- std::string RocksEngine::_getIdentPrefix(StringData ident) {
- boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
- auto prefixIter = _identPrefixMap.find(ident);
- invariant(prefixIter != _identPrefixMap.end());
- return encodePrefix(prefixIter->second);
- }
-
- rocksdb::Options RocksEngine::_options() const {
- // default options
- rocksdb::Options options;
- rocksdb::BlockBasedTableOptions table_options;
- table_options.block_cache = _block_cache;
- table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10));
- table_options.format_version = 2;
- options.table_factory.reset(rocksdb::NewBlockBasedTableFactory(table_options));
-
- options.write_buffer_size = 128 * 1024 * 1024; // 128MB
- options.max_write_buffer_number = 4;
- options.max_background_compactions = 8;
- options.max_background_flushes = 4;
- options.target_file_size_base = 64 * 1024 * 1024; // 64MB
- options.soft_rate_limit = 2;
- options.hard_rate_limit = 3;
- options.max_bytes_for_level_base = 512 * 1024 * 1024; // 512 MB
- options.max_open_files = 20000;
-
- if (rocksGlobalOptions.compression == "snappy") {
- options.compression = rocksdb::kSnappyCompression;
- } else if (rocksGlobalOptions.compression == "zlib") {
- options.compression = rocksdb::kZlibCompression;
- } else if (rocksGlobalOptions.compression == "none") {
- options.compression = rocksdb::kNoCompression;
- } else {
- log() << "Unknown compression, will use default (snappy)";
- }
-
- // create the DB if it's not already present
- options.create_if_missing = true;
- options.wal_dir = _path + "/journal";
-
- // allow override
- if (!rocksGlobalOptions.configString.empty()) {
- rocksdb::Options base_options(options);
- rocksdb::GetOptionsFromString(base_options, rocksGlobalOptions.configString, &options);
- }
-
- return options;
- }
-
- Status toMongoStatus( rocksdb::Status s ) {
- if ( s.ok() )
- return Status::OK();
- else
- return Status( ErrorCodes::InternalError, s.ToString() );
- }
-}
diff --git a/src/mongo/db/storage/rocks/rocks_engine.h b/src/mongo/db/storage/rocks/rocks_engine.h
deleted file mode 100644
index 3ee11ef336d..00000000000
--- a/src/mongo/db/storage/rocks/rocks_engine.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <list>
-#include <map>
-#include <string>
-#include <memory>
-
-#include <boost/optional.hpp>
-#include <boost/scoped_ptr.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/thread/mutex.hpp>
-
-#include <rocksdb/cache.h>
-#include <rocksdb/status.h>
-
-#include "mongo/base/disallow_copying.h"
-#include "mongo/bson/ordering.h"
-#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/db/storage/rocks/rocks_transaction.h"
-#include "mongo/util/string_map.h"
-
-namespace rocksdb {
- class ColumnFamilyHandle;
- struct ColumnFamilyDescriptor;
- struct ColumnFamilyOptions;
- class DB;
- class Comparator;
- class Iterator;
- struct Options;
- struct ReadOptions;
-}
-
-namespace mongo {
-
- struct CollectionOptions;
-
- class RocksEngine : public KVEngine {
- MONGO_DISALLOW_COPYING( RocksEngine );
- public:
- RocksEngine(const std::string& path, bool durable);
- virtual ~RocksEngine();
-
- virtual RecoveryUnit* newRecoveryUnit() override;
-
- virtual Status createRecordStore(OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options) override;
-
- virtual RecordStore* getRecordStore(OperationContext* opCtx, StringData ns,
- StringData ident,
- const CollectionOptions& options) override;
-
- virtual Status createSortedDataInterface(OperationContext* opCtx, StringData ident,
- const IndexDescriptor* desc) override;
-
- virtual SortedDataInterface* getSortedDataInterface(OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc) override;
-
- virtual Status dropIdent(OperationContext* opCtx, StringData ident) override;
-
- virtual bool hasIdent(OperationContext* opCtx, StringData ident) const override;
-
- virtual std::vector<std::string> getAllIdents( OperationContext* opCtx ) const override;
-
- virtual bool supportsDocLocking() const override {
- return true;
- }
-
- virtual bool supportsDirectoryPerDB() const override {
- return false;
- }
-
- virtual bool isDurable() const override { return _durable; }
-
- virtual int64_t getIdentSize(OperationContext* opCtx,
- StringData ident) {
- // TODO: return correct size.
- return 1;
- }
-
- virtual Status repairIdent(OperationContext* opCtx,
- StringData ident) {
- return Status::OK();
- }
-
- virtual void cleanShutdown() {}
-
- /**
- * Initializes a background job to remove excess documents in the oplog collections.
- * This applies to the capped collections in the local.oplog.* namespaces (specifically
- * local.oplog.rs for replica sets and local.oplog.$main for master/slave replication).
- * Returns true if a background job is running for the namespace.
- */
- static bool initRsOplogBackgroundThread(StringData ns);
-
- // rocks specific api
-
- rocksdb::DB* getDB() { return _db.get(); }
- const rocksdb::DB* getDB() const { return _db.get(); }
- size_t getBlockCacheUsage() const { return _block_cache->GetUsage(); }
-
- private:
- Status _createIdentPrefix(StringData ident);
- std::string _getIdentPrefix(StringData ident);
-
- rocksdb::Options _options() const;
-
- std::string _path;
- boost::scoped_ptr<rocksdb::DB> _db;
- std::shared_ptr<rocksdb::Cache> _block_cache;
-
- const bool _durable;
-
- // ident prefix map stores mapping from ident to a prefix (uint32_t)
- mutable boost::mutex _identPrefixMapMutex;
- typedef StringMap<uint32_t> IdentPrefixMap;
- IdentPrefixMap _identPrefixMap;
-
- // protected by _identPrefixMapMutex
- uint32_t _maxPrefix;
-
- // This is for concurrency control
- RocksTransactionEngine _transactionEngine;
-
- static const std::string kMetadataPrefix;
- };
-
- Status toMongoStatus( rocksdb::Status s );
-}
diff --git a/src/mongo/db/storage/rocks/rocks_engine_test.cpp b/src/mongo/db/storage/rocks/rocks_engine_test.cpp
deleted file mode 100644
index 31f5322b9ab..00000000000
--- a/src/mongo/db/storage/rocks/rocks_engine_test.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include <boost/filesystem/operations.hpp>
-#include <boost/scoped_ptr.hpp>
-
-#include <rocksdb/comparator.h>
-#include <rocksdb/db.h>
-#include <rocksdb/options.h>
-#include <rocksdb/slice.h>
-
-#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/db/storage/kv/kv_engine_test_harness.h"
-#include "mongo/db/storage/rocks/rocks_engine.h"
-#include "mongo/unittest/temp_dir.h"
-
-namespace mongo {
- class RocksEngineHarnessHelper : public KVHarnessHelper {
- public:
- RocksEngineHarnessHelper() : _dbpath("mongo-rocks-engine-test") {
- boost::filesystem::remove_all(_dbpath.path());
- restartEngine();
- }
-
- virtual ~RocksEngineHarnessHelper() = default;
-
- virtual KVEngine* getEngine() { return _engine.get(); }
-
- virtual KVEngine* restartEngine() {
- _engine.reset(nullptr);
- _engine.reset(new RocksEngine(_dbpath.path(), true));
- return _engine.get();
- }
-
- private:
- unittest::TempDir _dbpath;
-
- boost::scoped_ptr<RocksEngine> _engine;
- };
-
- KVHarnessHelper* KVHarnessHelper::create() { return new RocksEngineHarnessHelper(); }
-}
diff --git a/src/mongo/db/storage/rocks/rocks_global_options.cpp b/src/mongo/db/storage/rocks/rocks_global_options.cpp
deleted file mode 100644
index fa84cc17f40..00000000000
--- a/src/mongo/db/storage/rocks/rocks_global_options.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/base/status.h"
-#include "mongo/db/storage/rocks/rocks_global_options.h"
-#include "mongo/util/log.h"
-#include "mongo/util/options_parser/constraints.h"
-
-namespace mongo {
-
- RocksGlobalOptions rocksGlobalOptions;
-
- Status RocksGlobalOptions::add(moe::OptionSection* options) {
- moe::OptionSection rocksOptions("RocksDB options");
-
- rocksOptions.addOptionChaining("storage.rocksdb.cacheSizeGB", "rocksdbCacheSizeGB",
- moe::Int,
- "maximum amount of memory to allocate for cache; "
- "defaults to 1/2 of physical RAM").validRange(1, 10000);
- rocksOptions.addOptionChaining("storage.rocksdb.compression", "rocksdbCompression",
- moe::String,
- "block compression algorithm for collection data "
- "[none|snappy|zlib]")
- .format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
- .setDefault(moe::Value(std::string("snappy")));
- rocksOptions.addOptionChaining("storage.rocksdb.configString", "rocksdbConfigString",
- moe::String,
- "RocksDB storage engine custom "
- "configuration settings").hidden();
-
- return options->addSection(rocksOptions);
- }
-
- Status RocksGlobalOptions::store(const moe::Environment& params,
- const std::vector<std::string>& args) {
- if (params.count("storage.rocksdb.cacheSizeGB")) {
- rocksGlobalOptions.cacheSizeGB = params["storage.rocksdb.cacheSizeGB"].as<int>();
- log() << "Block Cache Size GB: " << rocksGlobalOptions.cacheSizeGB;
- }
- if (params.count("storage.rocksdb.compression")) {
- rocksGlobalOptions.compression =
- params["storage.rocksdb.compression"].as<std::string>();
- log() << "Compression: " << rocksGlobalOptions.compression;
- }
- if (params.count("storage.rocksdb.configString")) {
- rocksGlobalOptions.configString =
- params["storage.rocksdb.configString"].as<std::string>();
- log() << "Engine custom option: " << rocksGlobalOptions.configString;
- }
-
- return Status::OK();
- }
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/rocks/rocks_global_options.h b/src/mongo/db/storage/rocks/rocks_global_options.h
deleted file mode 100644
index 9a9e1b28d1f..00000000000
--- a/src/mongo/db/storage/rocks/rocks_global_options.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/util/options_parser/startup_option_init.h"
-#include "mongo/util/options_parser/startup_options.h"
-
-namespace mongo {
-
- namespace moe = mongo::optionenvironment;
-
- class RocksGlobalOptions {
- public:
- RocksGlobalOptions() : cacheSizeGB(0) {}
-
- Status add(moe::OptionSection* options);
- Status store(const moe::Environment& params, const std::vector<std::string>& args);
-
- size_t cacheSizeGB;
-
- std::string compression;
- std::string configString;
- };
-
- extern RocksGlobalOptions rocksGlobalOptions;
-}
diff --git a/src/mongo/db/storage/rocks/rocks_index.cpp b/src/mongo/db/storage/rocks/rocks_index.cpp
deleted file mode 100644
index 9baf286d367..00000000000
--- a/src/mongo/db/storage/rocks/rocks_index.cpp
+++ /dev/null
@@ -1,741 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/rocks/rocks_index.h"
-
-#include <boost/scoped_ptr.hpp>
-#include <boost/shared_ptr.hpp>
-#include <cstdlib>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#include <rocksdb/db.h>
-#include <rocksdb/iterator.h>
-#include <rocksdb/utilities/write_batch_with_index.h>
-
-#include "mongo/base/checked_cast.h"
-#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/storage/index_entry_comparison.h"
-#include "mongo/db/storage/rocks/rocks_engine.h"
-#include "mongo/db/storage/rocks/rocks_record_store.h"
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-#include "mongo/db/storage/rocks/rocks_util.h"
-#include "mongo/util/log.h"
-#include "mongo/util/mongoutils/str.h"
-
-namespace mongo {
-
- using boost::scoped_ptr;
- using boost::shared_ptr;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- namespace {
-
- /**
- * Strips the field names from a BSON object
- */
- BSONObj stripFieldNames( const BSONObj& obj ) {
- BSONObjBuilder b;
- BSONObjIterator i( obj );
- while ( i.more() ) {
- BSONElement e = i.next();
- b.appendAs( e, "" );
- }
- return b.obj();
- }
-
- string dupKeyError(const BSONObj& key) {
- stringstream ss;
- ss << "E11000 duplicate key error ";
- ss << "dup key: " << key.toString();
- return ss.str();
- }
-
- const int kTempKeyMaxSize = 1024; // Do the same as the heap implementation
-
- Status checkKeySize(const BSONObj& key) {
- if (key.objsize() >= kTempKeyMaxSize) {
- string msg = mongoutils::str::stream()
- << "RocksIndex::insert: key too large to index, failing " << ' '
- << key.objsize() << ' ' << key;
- return Status(ErrorCodes::KeyTooLong, msg);
- }
- return Status::OK();
- }
-
- /**
- * Functionality shared by both unique and standard index
- */
- class RocksCursorBase : public SortedDataInterface::Cursor {
- public:
- RocksCursorBase(OperationContext* txn, rocksdb::DB* db, std::string prefix,
- bool forward, Ordering order)
- : _db(db),
- _prefix(prefix),
- _forward(forward),
- _order(order),
- _locateCacheValid(false) {
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- _iterator.reset(ru->NewIterator(_prefix));
- _currentSequenceNumber = ru->snapshot()->GetSequenceNumber();
- checkStatus();
- }
-
- int getDirection() const { return _forward ? 1 : -1; }
- bool isEOF() const { return _iterator.get() == nullptr || !_iterator->Valid(); }
-
- /**
- * Will only be called with other from same index as this.
- * All EOF locs should be considered equal.
- */
- bool pointsToSamePlaceAs(const Cursor& genOther) const {
- const RocksCursorBase& other = checked_cast<const RocksCursorBase&>(genOther);
-
- if (isEOF() && other.isEOF()) {
- return true;
- } else if (isEOF() || other.isEOF()) {
- return false;
- }
-
- if (_iterator->key() != other._iterator->key()) {
- return false;
- }
-
- // even if keys are equal, record IDs might be different (for unique indexes, since
- // in non-unique indexes RecordID is already encoded in the key)
- return _loc == other._loc;
- }
-
- virtual void advance() {
- // Advance on a cursor at the end is a no-op
- if (isEOF()) {
- return;
- }
- advanceCursor();
- updatePosition();
- }
-
- bool locate(const BSONObj& key, const RecordId& loc) {
- const BSONObj finalKey = stripFieldNames(key);
-
- if (_locateCacheValid == true && finalKey == _locateCacheKey &&
- loc == _locateCacheRecordId) {
- // exact same call to locate()
- return _locateCacheResult;
- }
-
- fillQuery(finalKey, loc, &_query);
- bool result = _locate(_query, loc);
- updatePosition();
- // An explicit search at the start of the range should always return false
- if (loc == RecordId::min() || loc == RecordId::max()) {
- result = false;
- }
-
- {
- // memoization
- _locateCacheKey = finalKey.getOwned();
- _locateCacheRecordId = loc;
- _locateCacheResult = result;
- _locateCacheValid = true;
- }
- return result;
- }
-
- // same first five args as IndexEntryComparison::makeQueryObject (which is commented).
- void advanceTo(const BSONObj &keyBegin,
- int keyBeginLen,
- bool afterKey,
- const vector<const BSONElement*>& keyEnd,
- const vector<bool>& keyEndInclusive) {
- // make a key representing the location to which we want to advance.
- BSONObj key = IndexEntryComparison::makeQueryObject(
- keyBegin,
- keyBeginLen,
- afterKey,
- keyEnd,
- keyEndInclusive,
- getDirection() );
-
- fillQuery(key, RecordId(), &_query);
- _locate(_query, RecordId());
- updatePosition();
- }
-
- /**
- * Locate a key with fields comprised of a combination of keyBegin fields and keyEnd
- * fields. Also same first five args as IndexEntryComparison::makeQueryObject (which is
- * commented).
- */
- void customLocate(const BSONObj& keyBegin,
- int keyBeginLen,
- bool afterVersion,
- const vector<const BSONElement*>& keyEnd,
- const vector<bool>& keyEndInclusive) {
- advanceTo( keyBegin, keyBeginLen, afterVersion, keyEnd, keyEndInclusive );
- }
-
- BSONObj getKey() const {
- if (isEOF()) {
- return BSONObj();
- }
-
- if (!_keyBsonCache.isEmpty()) {
- return _keyBsonCache;
- }
-
- _keyBsonCache =
- KeyString::toBson(_key.getBuffer(), _key.getSize(), _order, _typeBits);
-
- return _keyBsonCache;
- }
-
- RecordId getRecordId() const { return _loc; }
-
- void savePosition() {
- _savedEOF = isEOF();
- }
-
- void restorePosition(OperationContext* txn) {
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- if (_currentSequenceNumber != ru->snapshot()->GetSequenceNumber()) {
- _iterator.reset(ru->NewIterator(_prefix));
- _currentSequenceNumber = ru->snapshot()->GetSequenceNumber();
-
- if (!_savedEOF) {
- _locate(_key, _loc);
- updatePosition();
- }
- }
- }
-
- protected:
- // Uses _key for the key. Implemented by unique and standard index
- virtual bool _locate(const KeyString& query, RecordId loc) = 0;
-
- virtual void fillQuery(const BSONObj& key, RecordId loc, KeyString* query) const = 0;
-
- // Called after _key has been filled in. Must not throw WriteConflictException.
- virtual void updateLocAndTypeBits() = 0;
-
-
- void advanceCursor() {
- if (_forward) {
- _iterator->Next();
- } else {
- _iterator->Prev();
- }
- }
-
- // Seeks to query. Returns true on exact match.
- bool seekCursor(const KeyString& query) {
- const rocksdb::Slice keySlice(query.getBuffer(), query.getSize());
- _iterator->Seek(keySlice);
- checkStatus();
- if (!_iterator->Valid()) {
- if (!_forward) {
- // this will give lower bound behavior for backwards
- _iterator->SeekToLast();
- checkStatus();
- }
- return false;
- }
-
- if (_iterator->key() == keySlice) {
- return true;
- }
-
- if (!_forward) {
- // if we can't find the exact result going backwards, we need to call Prev() so
- // that we're at the first value less than (to the left of) what we were
- // searching
- // for, rather than the first value greater than (to the right of) the value we
- // were
- // searching for.
- _iterator->Prev();
- }
-
- return false;
- }
-
- void updatePosition() {
- if (isEOF()) {
- _loc = RecordId();
- return;
- }
-
- checkStatus();
-
- auto key = _iterator->key();
- _key.resetFromBuffer(key.data(), key.size());
- _keyBsonCache = BSONObj(); // Invalidate cached BSONObj.
-
- _locateCacheValid = false; // Invalidate locate cache
- _locateCacheKey = BSONObj(); // Invalidate locate cache
-
- updateLocAndTypeBits();
- }
-
- void checkStatus() {
- if ( !_iterator->status().ok() ) {
- log() << _iterator->status().ToString();
- // TODO: SERVER-16979 Correctly handle errors returned by RocksDB
- invariant( false );
- }
- }
-
- rocksdb::DB* _db; // not owned
- std::string _prefix;
- boost::scoped_ptr<rocksdb::Iterator> _iterator;
- const bool _forward;
- Ordering _order;
-
- // These are for storing savePosition/restorePosition state
- bool _savedEOF;
- RecordId _savedRecordId;
- rocksdb::SequenceNumber _currentSequenceNumber;
-
- KeyString _key;
- KeyString::TypeBits _typeBits;
- RecordId _loc;
- mutable BSONObj _keyBsonCache; // if isEmpty, cache invalid and must be loaded from
- // _key.
-
- KeyString _query;
-
- // These are for caching repeated calls to locate()
- bool _locateCacheValid;
- BSONObj _locateCacheKey;
- RecordId _locateCacheRecordId;
- bool _locateCacheResult;
- };
-
- class RocksStandardCursor : public RocksCursorBase {
- public:
- RocksStandardCursor(OperationContext* txn, rocksdb::DB* db, std::string prefix,
- bool forward, Ordering order)
- : RocksCursorBase(txn, db, prefix, forward, order) {}
-
- virtual void fillQuery(const BSONObj& key, RecordId loc, KeyString* query) const {
- // Null cursors should start at the zero key to maintain search ordering in the
- // collator.
- // Reverse cursors should start on the last matching key.
- if (loc.isNull()) {
- loc = _forward ? RecordId::min() : RecordId::max();
- }
-
- query->resetToKey(key, _order, loc);
- }
-
- virtual bool _locate(const KeyString& query, RecordId loc) {
- // loc already encoded in _key
- return seekCursor(query);
- }
-
- virtual void updateLocAndTypeBits() {
- _loc = KeyString::decodeRecordIdAtEnd(_key.getBuffer(), _key.getSize());
- auto value = _iterator->value();
- BufReader br(value.data(), value.size());
- _typeBits.resetFromBuffer(&br);
- }
- };
-
- class RocksUniqueCursor : public RocksCursorBase {
- public:
- RocksUniqueCursor(OperationContext* txn, rocksdb::DB* db, std::string prefix,
- bool forward, Ordering order)
- : RocksCursorBase(txn, db, prefix, forward, order) {}
-
- virtual void fillQuery(const BSONObj& key, RecordId loc, KeyString* query) const {
- query->resetToKey(key, _order); // loc doesn't go in _query for unique indexes
- }
-
- virtual bool _locate(const KeyString& query, RecordId loc) {
- if (!seekCursor(query)) {
- // If didn't seek to exact key, start at beginning of wherever we ended up.
- return false;
- }
- dassert(!isEOF());
-
- // If we get here we need to look at the actual RecordId for this key and make sure
- // we are supposed to see it.
-
- auto value = _iterator->value();
- BufReader br(value.data(), value.size());
- RecordId locInIndex = KeyString::decodeRecordId(&br);
-
- if ((_forward && (locInIndex < loc)) || (!_forward && (locInIndex > loc))) {
- advanceCursor();
- }
-
- return true;
- }
-
- void updateLocAndTypeBits() {
- // We assume that cursors can only ever see unique indexes in their "pristine"
- // state,
- // where no duplicates are possible. The cases where dups are allowed should hold
- // sufficient locks to ensure that no cursor ever sees them.
-
- auto value = _iterator->value();
- BufReader br(value.data(), value.size());
- _loc = KeyString::decodeRecordId(&br);
- _typeBits.resetFromBuffer(&br);
-
- if (!br.atEof()) {
- severe() << "Unique index cursor seeing multiple records for key " << getKey();
- fassertFailed(28609);
- }
- }
- };
-
- // TODO optimize and create two implementations -- one for unique and one for standard index
- class RocksIndexBulkBuilder : public SortedDataBuilderInterface {
- public:
- RocksIndexBulkBuilder(RocksIndexBase* index, OperationContext* txn, bool dupsAllowed)
- : _index(index), _txn(txn), _dupsAllowed(dupsAllowed) {
- invariant(index->isEmpty(txn));
- }
-
- Status addKey(const BSONObj& key, const RecordId& loc) {
- return _index->insert(_txn, key, loc, _dupsAllowed);
- }
-
- void commit(bool mayInterrupt) {
- WriteUnitOfWork uow(_txn);
- uow.commit();
- }
-
- private:
- RocksIndexBase* _index;
- OperationContext* _txn;
- bool _dupsAllowed;
- };
-
- } // namespace
-
- /// RocksIndexBase
-
- RocksIndexBase::RocksIndexBase(rocksdb::DB* db, std::string prefix, std::string ident,
- Ordering order)
- : _db(db), _prefix(prefix), _ident(std::move(ident)), _order(order) {}
-
- SortedDataBuilderInterface* RocksIndexBase::getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) {
- return new RocksIndexBulkBuilder(this, txn, dupsAllowed);
- }
-
- void RocksIndexBase::fullValidate(OperationContext* txn, bool full, long long* numKeysOut,
- BSONObjBuilder* output) const {
- if (numKeysOut) {
- boost::scoped_ptr<SortedDataInterface::Cursor> cursor(newCursor(txn, 1));
- cursor->locate(minKey, RecordId::min());
- *numKeysOut = 0;
- while (!cursor->isEOF()) {
- cursor->advance();
- (*numKeysOut)++;
- }
- }
- }
-
- bool RocksIndexBase::isEmpty(OperationContext* txn) {
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- boost::scoped_ptr<rocksdb::Iterator> it(ru->NewIterator(_prefix));
-
- it->SeekToFirst();
- return !it->Valid();
- }
-
- Status RocksIndexBase::initAsEmpty(OperationContext* txn) {
- // no-op
- return Status::OK();
- }
-
- long long RocksIndexBase::getSpaceUsedBytes(OperationContext* txn) const {
- uint64_t storageSize;
- std::string nextPrefix = std::move(rocksGetNextPrefix(_prefix));
- rocksdb::Range wholeRange(_prefix, nextPrefix);
- _db->GetApproximateSizes(&wholeRange, 1, &storageSize);
- // There might be some bytes in the WAL that we don't count here. Some
- // tests depend on the fact that non-empty indexes have non-zero sizes
- return static_cast<long long>(
- std::max(storageSize, static_cast<uint64_t>(1)));
- }
-
- std::string RocksIndexBase::_makePrefixedKey(const std::string& prefix,
- const KeyString& encodedKey) {
- std::string key(prefix);
- key.append(encodedKey.getBuffer(), encodedKey.getSize());
- return key;
- }
-
- /// RocksUniqueIndex
-
- RocksUniqueIndex::RocksUniqueIndex(rocksdb::DB* db, std::string prefix, std::string ident,
- Ordering order)
- : RocksIndexBase(db, prefix, ident, order) {}
-
- Status RocksUniqueIndex::insert(OperationContext* txn, const BSONObj& key, const RecordId& loc,
- bool dupsAllowed) {
- Status s = checkKeySize(key);
- if (!s.isOK()) {
- return s;
- }
-
- KeyString encodedKey(key, _order);
- std::string prefixedKey(_makePrefixedKey(_prefix, encodedKey));
-
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- if (!ru->transaction()->registerWrite(prefixedKey)) {
- throw WriteConflictException();
- }
-
- std::string currentValue;
- auto getStatus = ru->Get(prefixedKey, &currentValue);
- if (!getStatus.ok() && !getStatus.IsNotFound()) {
- // This means that Get() returned an error
- // TODO: SERVER-16979 Correctly handle errors returned by RocksDB
- invariant(false);
- } else if (getStatus.IsNotFound()) {
- // nothing here. just insert the value
- KeyString value(loc);
- if (!encodedKey.getTypeBits().isAllZeros()) {
- value.appendTypeBits(encodedKey.getTypeBits());
- }
- rocksdb::Slice valueSlice(value.getBuffer(), value.getSize());
- ru->writeBatch()->Put(prefixedKey, valueSlice);
- return Status::OK();
- }
-
- // we are in a weird state where there might be multiple values for a key
- // we put them all in the "list"
- // Note that we can't omit AllZeros when there are multiple locs for a value. When we remove
- // down to a single value, it will be cleaned up.
-
- bool insertedLoc = false;
- KeyString valueVector;
- BufReader br(currentValue.data(), currentValue.size());
- while (br.remaining()) {
- RecordId locInIndex = KeyString::decodeRecordId(&br);
- if (loc == locInIndex) {
- return Status::OK(); // already in index
- }
-
- if (!insertedLoc && loc < locInIndex) {
- valueVector.appendRecordId(loc);
- valueVector.appendTypeBits(encodedKey.getTypeBits());
- insertedLoc = true;
- }
-
- // Copy from old to new value
- valueVector.appendRecordId(locInIndex);
- valueVector.appendTypeBits(KeyString::TypeBits::fromBuffer(&br));
- }
-
- if (!dupsAllowed) {
- return Status(ErrorCodes::DuplicateKey, dupKeyError(key));
- }
-
- if (!insertedLoc) {
- // This loc is higher than all currently in the index for this key
- valueVector.appendRecordId(loc);
- valueVector.appendTypeBits(encodedKey.getTypeBits());
- }
-
- rocksdb::Slice valueVectorSlice(valueVector.getBuffer(), valueVector.getSize());
- ru->writeBatch()->Put(prefixedKey, valueVectorSlice);
- return Status::OK();
- }
-
- void RocksUniqueIndex::unindex(OperationContext* txn, const BSONObj& key, const RecordId& loc,
- bool dupsAllowed) {
- KeyString encodedKey(key, _order);
- std::string prefixedKey(_makePrefixedKey(_prefix, encodedKey));
-
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- // We can't let two threads unindex the same key
- if (!ru->transaction()->registerWrite(prefixedKey)) {
- throw WriteConflictException();
- }
-
- if (!dupsAllowed) {
- ru->writeBatch()->Delete(prefixedKey);
- return;
- }
-
- // dups are allowed, so we have to deal with a vector of RecordIds.
- std::string currentValue;
- auto getStatus = ru->Get(prefixedKey, &currentValue);
- if (!getStatus.ok() && !getStatus.IsNotFound()) {
- // This means that Get() returned an error
- // TODO: SERVER-16979 Correctly handle errors returned by RocksDB
- invariant(false);
- } else if (getStatus.IsNotFound()) {
- // nothing here. just return
- return;
- }
-
- bool foundLoc = false;
- std::vector<std::pair<RecordId, KeyString::TypeBits>> records;
-
- BufReader br(currentValue.data(), currentValue.size());
- while (br.remaining()) {
- RecordId locInIndex = KeyString::decodeRecordId(&br);
- KeyString::TypeBits typeBits = KeyString::TypeBits::fromBuffer(&br);
-
- if (loc == locInIndex) {
- if (records.empty() && !br.remaining()) {
- // This is the common case: we are removing the only loc for this key.
- // Remove the whole entry.
- ru->writeBatch()->Delete(prefixedKey);
- return;
- }
-
- foundLoc = true;
- continue;
- }
-
- records.push_back(std::make_pair(locInIndex, typeBits));
- }
-
- if (!foundLoc) {
- warning().stream() << loc << " not found in the index for key " << key;
- return; // nothing to do
- }
-
- // Put other locs for this key back in the index.
- KeyString newValue;
- invariant(!records.empty());
- for (size_t i = 0; i < records.size(); i++) {
- newValue.appendRecordId(records[i].first);
- // When there is only one record, we can omit AllZeros TypeBits. Otherwise they need
- // to be included.
- if (!(records[i].second.isAllZeros() && records.size() == 1)) {
- newValue.appendTypeBits(records[i].second);
- }
- }
-
- rocksdb::Slice newValueSlice(newValue.getBuffer(), newValue.getSize());
- ru->writeBatch()->Put(prefixedKey, newValueSlice);
- }
-
- SortedDataInterface::Cursor* RocksUniqueIndex::newCursor(OperationContext* txn,
- int direction) const {
- invariant( ( direction == 1 || direction == -1 ) && "invalid value for direction" );
- return new RocksUniqueCursor(txn, _db, _prefix, direction == 1, _order);
- }
-
- Status RocksUniqueIndex::dupKeyCheck(OperationContext* txn, const BSONObj& key,
- const RecordId& loc) {
- KeyString encodedKey(key, _order);
- std::string prefixedKey(_makePrefixedKey(_prefix, encodedKey));
-
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- std::string value;
- auto getStatus = ru->Get(prefixedKey, &value);
- if (!getStatus.ok() && !getStatus.IsNotFound()) {
- // This means that Get() returned an error
- // TODO: SERVER-16979 Correctly handle errors returned by RocksDB
- invariant(false);
- } else if (getStatus.IsNotFound()) {
- // not found, not duplicate key
- return Status::OK();
- }
-
- // If the key exists, check if we already have this loc at this key. If so, we don't
- // consider that to be a dup.
- BufReader br(value.data(), value.size());
- while (br.remaining()) {
- if (KeyString::decodeRecordId(&br) == loc) {
- return Status::OK();
- }
-
- KeyString::TypeBits::fromBuffer(&br); // Just calling this to advance reader.
- }
- return Status(ErrorCodes::DuplicateKey, dupKeyError(key));
- }
-
- /// RocksStandardIndex
- RocksStandardIndex::RocksStandardIndex(rocksdb::DB* db, std::string prefix, std::string ident,
- Ordering order)
- : RocksIndexBase(db, prefix, ident, order) {}
-
- Status RocksStandardIndex::insert(OperationContext* txn, const BSONObj& key,
- const RecordId& loc, bool dupsAllowed) {
- invariant(dupsAllowed);
- Status s = checkKeySize(key);
- if (!s.isOK()) {
- return s;
- }
-
- // If we're inserting an index element, this means we already "locked" the RecordId of the
- // document. No need to register write here
- KeyString encodedKey(key, _order, loc);
- std::string prefixedKey(_makePrefixedKey(_prefix, encodedKey));
-
- rocksdb::Slice value;
- if (!encodedKey.getTypeBits().isAllZeros()) {
- value =
- rocksdb::Slice(reinterpret_cast<const char*>(encodedKey.getTypeBits().getBuffer()),
- encodedKey.getTypeBits().getSize());
- }
-
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- ru->writeBatch()->Put(prefixedKey, value);
-
- return Status::OK();
- }
-
- void RocksStandardIndex::unindex(OperationContext* txn, const BSONObj& key, const RecordId& loc,
- bool dupsAllowed) {
- invariant(dupsAllowed);
- // If we're unindexing an index element, this means we already "locked" the RecordId of the
- // document. No need to register write here
-
- KeyString encodedKey(key, _order, loc);
- std::string prefixedKey(_makePrefixedKey(_prefix, encodedKey));
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- ru->writeBatch()->Delete(prefixedKey);
- }
-
- SortedDataInterface::Cursor* RocksStandardIndex::newCursor(OperationContext* txn,
- int direction) const {
- invariant( ( direction == 1 || direction == -1 ) && "invalid value for direction" );
- return new RocksStandardCursor(txn, _db, _prefix, direction == 1, _order);
- }
-
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/rocks/rocks_index.h b/src/mongo/db/storage/rocks/rocks_index.h
deleted file mode 100644
index 3c91185aac2..00000000000
--- a/src/mongo/db/storage/rocks/rocks_index.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/db/storage/sorted_data_interface.h"
-
-#include <atomic>
-#include <boost/shared_ptr.hpp>
-#include <string>
-
-#include <rocksdb/db.h>
-
-#include "mongo/bson/ordering.h"
-#include "mongo/db/storage/key_string.h"
-
-#pragma once
-
-namespace rocksdb {
- class DB;
-}
-
-namespace mongo {
-
- class RocksRecoveryUnit;
-
- class RocksIndexBase : public SortedDataInterface {
- MONGO_DISALLOW_COPYING(RocksIndexBase);
-
- public:
- RocksIndexBase(rocksdb::DB* db, std::string prefix, std::string ident, Ordering order);
-
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed);
-
- virtual void fullValidate(OperationContext* txn, bool full, long long* numKeysOut,
- BSONObjBuilder* output) const;
-
- virtual bool appendCustomStats(OperationContext* txn, BSONObjBuilder* output,
- double scale) const {
- // TODO
- return false;
- }
-
- virtual bool isEmpty(OperationContext* txn);
-
- virtual Status initAsEmpty(OperationContext* txn);
-
- virtual long long getSpaceUsedBytes( OperationContext* txn ) const;
-
- protected:
- static std::string _makePrefixedKey(const std::string& prefix, const KeyString& encodedKey);
-
- rocksdb::DB* _db; // not owned
-
- // Each key in the index is prefixed with _prefix
- std::string _prefix;
- std::string _ident;
-
- // used to construct RocksCursors
- const Ordering _order;
- };
-
- class RocksUniqueIndex : public RocksIndexBase {
- public:
- RocksUniqueIndex(rocksdb::DB* db, std::string prefix, std::string ident, Ordering order);
-
- virtual Status insert(OperationContext* txn, const BSONObj& key, const RecordId& loc,
- bool dupsAllowed);
- virtual void unindex(OperationContext* txn, const BSONObj& key, const RecordId& loc,
- bool dupsAllowed);
- virtual SortedDataInterface::Cursor* newCursor(OperationContext* txn, int direction) const;
-
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc);
- };
-
- class RocksStandardIndex : public RocksIndexBase {
- public:
- RocksStandardIndex(rocksdb::DB* db, std::string prefix, std::string ident, Ordering order);
-
- virtual Status insert(OperationContext* txn, const BSONObj& key, const RecordId& loc,
- bool dupsAllowed);
- virtual void unindex(OperationContext* txn, const BSONObj& key, const RecordId& loc,
- bool dupsAllowed);
- virtual SortedDataInterface::Cursor* newCursor(OperationContext* txn, int direction) const;
-
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
- // dupKeyCheck shouldn't be called for non-unique indexes
- invariant(false);
- }
- };
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/rocks/rocks_index_test.cpp b/src/mongo/db/storage/rocks/rocks_index_test.cpp
deleted file mode 100644
index 43effa007b1..00000000000
--- a/src/mongo/db/storage/rocks/rocks_index_test.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include <boost/scoped_ptr.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/filesystem/operations.hpp>
-#include <string>
-
-#include <rocksdb/comparator.h>
-#include <rocksdb/db.h>
-#include <rocksdb/options.h>
-#include <rocksdb/slice.h>
-
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/storage/sorted_data_interface_test_harness.h"
-#include "mongo/db/storage/rocks/rocks_engine.h"
-#include "mongo/db/storage/rocks/rocks_index.h"
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-#include "mongo/db/storage/rocks/rocks_transaction.h"
-#include "mongo/unittest/temp_dir.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-
- using boost::scoped_ptr;
- using boost::shared_ptr;
- using std::string;
-
- class RocksIndexHarness : public HarnessHelper {
- public:
- RocksIndexHarness() : _order(Ordering::make(BSONObj())), _tempDir(_testNamespace) {
- boost::filesystem::remove_all(_tempDir.path());
- rocksdb::DB* db;
- rocksdb::Options options;
- options.create_if_missing = true;
- auto s = rocksdb::DB::Open(options, _tempDir.path(), &db);
- ASSERT(s.ok());
- _db.reset(db);
- }
-
- virtual SortedDataInterface* newSortedDataInterface(bool unique) {
- if (unique) {
- return new RocksUniqueIndex(_db.get(), "prefix", "ident", _order);
- } else {
- return new RocksStandardIndex(_db.get(), "prefix", "ident", _order);
- }
- }
-
- virtual RecoveryUnit* newRecoveryUnit() {
- return new RocksRecoveryUnit(&_transactionEngine, _db.get(), true);
- }
-
- private:
- Ordering _order;
- string _testNamespace = "mongo-rocks-sorted-data-test";
- unittest::TempDir _tempDir;
- scoped_ptr<rocksdb::DB> _db;
- RocksTransactionEngine _transactionEngine;
- };
-
- HarnessHelper* newHarnessHelper() { return new RocksIndexHarness(); }
-
- TEST(RocksIndexTest, Isolation) {
- scoped_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
- scoped_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- ASSERT(sorted->isEmpty(opCtx.get()));
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- {
- WriteUnitOfWork uow(opCtx.get());
-
- ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
- ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, false));
-
- uow.commit();
- }
- }
-
- {
- scoped_ptr<OperationContext> t1(harnessHelper->newOperationContext());
- scoped_ptr<OperationContext> t2(harnessHelper->newOperationContext());
-
- scoped_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get()));
- scoped_ptr<WriteUnitOfWork> w2(new WriteUnitOfWork(t2.get()));
-
- ASSERT_OK(sorted->insert(t1.get(), key3, loc3, false));
- ASSERT_OK(sorted->insert(t2.get(), key4, loc4, false));
-
- // this should throw
- ASSERT_THROWS(sorted->insert(t2.get(), key3, loc5, false), WriteConflictException);
-
- w1->commit(); // this should succeed
- }
-
- {
- scoped_ptr<OperationContext> t1(harnessHelper->newOperationContext());
- scoped_ptr<OperationContext> t2(harnessHelper->newOperationContext());
-
- scoped_ptr<WriteUnitOfWork> w2(new WriteUnitOfWork(t2.get()));
- // ensure we start w2 transaction
- ASSERT_OK(sorted->insert(t2.get(), key4, loc4, false));
-
- {
- scoped_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get()));
-
- {
- WriteUnitOfWork w(t1.get());
- ASSERT_OK(sorted->insert(t1.get(), key5, loc3, false));
- w.commit();
- }
- w1->commit();
- }
-
- // this should throw
- ASSERT_THROWS(sorted->insert(t2.get(), key5, loc3, false), WriteConflictException);
- }
- }
-}
diff --git a/src/mongo/db/storage/rocks/rocks_init.cpp b/src/mongo/db/storage/rocks/rocks_init.cpp
deleted file mode 100644
index 1f30a87f526..00000000000
--- a/src/mongo/db/storage/rocks/rocks_init.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/rocks/rocks_engine.h"
-#include "mongo/db/storage/rocks/rocks_server_status.h"
-
-#include "mongo/base/init.h"
-#include "mongo/db/global_environment_experiment.h"
-#include "mongo/db/storage_options.h"
-#include "mongo/db/storage/kv/kv_storage_engine.h"
-#include "mongo/db/storage/storage_engine_metadata.h"
-#include "mongo/util/mongoutils/str.h"
-
-namespace mongo {
- const std::string kRocksDBEngineName = "RocksDB";
-
- namespace {
- class RocksFactory : public StorageEngine::Factory {
- public:
- virtual ~RocksFactory(){}
- virtual StorageEngine* create(const StorageGlobalParams& params,
- const StorageEngineLockFile& lockFile) const {
- KVStorageEngineOptions options;
- options.directoryPerDB = params.directoryperdb;
- options.forRepair = params.repair;
- // Mongo keeps some files in params.dbpath. To avoid collision, put out files under
- // db/ directory
- auto engine = new RocksEngine(params.dbpath + "/db", params.dur);
- // Intentionally leaked.
- auto leaked __attribute__((unused)) = new RocksServerStatusSection(engine);
-
- return new KVStorageEngine(engine, options);
- }
-
- virtual StringData getCanonicalName() const {
- return kRocksDBEngineName;
- }
-
- virtual Status validateCollectionStorageOptions(const BSONObj& options) const {
- return Status::OK();
- }
-
- virtual Status validateIndexStorageOptions(const BSONObj& options) const {
- return Status::OK();
- }
-
- virtual Status validateMetadata(const StorageEngineMetadata& metadata,
- const StorageGlobalParams& params) const {
- const BSONObj& options = metadata.getStorageEngineOptions();
- BSONElement element = options.getField(kRocksFormatVersionString);
- if (element.eoo() || !element.isNumber()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "Storage engine metadata format not recognized. If you created "
- "this database with older version of mongo, please reload the "
- "database using mongodump and mongorestore");
- }
- if (element.numberInt() != kRocksFormatVersion) {
- return Status(
- ErrorCodes::UnsupportedFormat,
- str::stream()
- << "Database created with format version " << element.numberInt()
- << " and this version only supports format version "
- << kRocksFormatVersion
- << ". Please reload the database using mongodump and mongorestore");
- }
- return Status::OK();
- }
-
- virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
- BSONObjBuilder builder;
- builder.append(kRocksFormatVersionString, kRocksFormatVersion);
- return builder.obj();
- }
-
- private:
- // Current disk format. We bump this number when we change the disk format. MongoDB will
- // fail to start if the versions don't match. In that case a user needs to run mongodump
- // and mongorestore.
- // * Version 1 was the format with many column families -- one column family for each
- // collection and index
- // * Version 2 (current) keeps all collections and indexes in a single column family
- const int kRocksFormatVersion = 2;
- const std::string kRocksFormatVersionString = "rocksFormatVersion";
- };
- } // namespace
-
- MONGO_INITIALIZER_WITH_PREREQUISITES(RocksEngineInit,
- ("SetGlobalEnvironment"))
- (InitializerContext* context) {
-
- getGlobalEnvironment()->registerStorageEngine(kRocksDBEngineName, new RocksFactory());
- return Status::OK();
- }
-
-}
diff --git a/src/mongo/db/storage/rocks/rocks_options_init.cpp b/src/mongo/db/storage/rocks/rocks_options_init.cpp
deleted file mode 100644
index 366d4b3e374..00000000000
--- a/src/mongo/db/storage/rocks/rocks_options_init.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/util/options_parser/startup_option_init.h"
-
-#include <iostream>
-
-#include "mongo/util/options_parser/startup_options.h"
-#include "mongo/db/storage/rocks/rocks_global_options.h"
-
-namespace mongo {
-
- MONGO_MODULE_STARTUP_OPTIONS_REGISTER(RocksOptions)(InitializerContext* context) {
- return rocksGlobalOptions.add(&moe::startupOptions);
- }
-
- MONGO_STARTUP_OPTIONS_VALIDATE(RocksOptions)(InitializerContext* context) {
- return Status::OK();
- }
-
- MONGO_STARTUP_OPTIONS_STORE(RocksOptions)(InitializerContext* context) {
- Status ret = rocksGlobalOptions.store(moe::startupOptionsParsed, context->args());
- if (!ret.isOK()) {
- std::cerr << ret.toString() << std::endl;
- std::cerr << "try '" << context->args()[0] << " --help' for more information"
- << std::endl;
- ::_exit(EXIT_BADOPTIONS);
- }
- return Status::OK();
- }
-}
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.cpp b/src/mongo/db/storage/rocks/rocks_record_store.cpp
deleted file mode 100644
index 3c11318f83c..00000000000
--- a/src/mongo/db/storage/rocks/rocks_record_store.cpp
+++ /dev/null
@@ -1,903 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/rocks/rocks_record_store.h"
-
-#include <boost/scoped_array.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/thread/locks.hpp>
-#include <memory>
-#include <algorithm>
-#include <utility>
-
-#include <rocksdb/comparator.h>
-#include <rocksdb/db.h>
-#include <rocksdb/options.h>
-#include <rocksdb/slice.h>
-#include <rocksdb/utilities/write_batch_with_index.h>
-
-#include "mongo/base/checked_cast.h"
-#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/rocks/rocks_engine.h"
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-#include "mongo/db/storage/oplog_hack.h"
-#include "mongo/platform/endian.h"
-#include "mongo/util/background.h"
-#include "mongo/util/log.h"
-#include "mongo/util/timer.h"
-
-namespace mongo {
-
- using boost::shared_ptr;
- using std::string;
-
- namespace {
-
- class CappedInsertChange : public RecoveryUnit::Change {
- public:
- CappedInsertChange(CappedVisibilityManager* cappedVisibilityManager,
- const RecordId& record)
- : _cappedVisibilityManager(cappedVisibilityManager), _record(record) {}
-
- virtual void commit() { _cappedVisibilityManager->dealtWithCappedRecord(_record); }
-
- virtual void rollback() { _cappedVisibilityManager->dealtWithCappedRecord(_record); }
-
- private:
- CappedVisibilityManager* _cappedVisibilityManager;
- RecordId _record;
- };
- } // namespace
-
- void CappedVisibilityManager::addUncommittedRecord(OperationContext* txn,
- const RecordId& record) {
- boost::lock_guard<boost::mutex> lk(_lock);
- _addUncommittedRecord_inlock(txn, record);
- }
-
- void CappedVisibilityManager::_addUncommittedRecord_inlock(OperationContext* txn,
- const RecordId& record) {
- // todo: make this a dassert at some point
- invariant(_uncommittedRecords.empty() || _uncommittedRecords.back() < record);
- _uncommittedRecords.push_back(record);
- txn->recoveryUnit()->registerChange(new CappedInsertChange(this, record));
- _oplog_highestSeen = record;
- }
-
- RecordId CappedVisibilityManager::getNextAndAddUncommittedRecord(
- OperationContext* txn, std::function<RecordId()> nextId) {
- boost::lock_guard<boost::mutex> lk(_lock);
- RecordId record = nextId();
- _addUncommittedRecord_inlock(txn, record);
- return record;
- }
-
- void CappedVisibilityManager::dealtWithCappedRecord(const RecordId& record) {
- boost::lock_guard<boost::mutex> lk(_lock);
- std::vector<RecordId>::iterator it =
- std::find(_uncommittedRecords.begin(), _uncommittedRecords.end(), record);
- invariant(it != _uncommittedRecords.end());
- _uncommittedRecords.erase(it);
- }
-
- bool CappedVisibilityManager::isCappedHidden(const RecordId& record) const {
- boost::lock_guard<boost::mutex> lk(_lock);
- if (_uncommittedRecords.empty()) {
- return false;
- }
- return _uncommittedRecords.front() <= record;
- }
-
- void CappedVisibilityManager::updateHighestSeen(const RecordId& record) {
- if (record > _oplog_highestSeen) {
- boost::lock_guard<boost::mutex> lk(_lock);
- if (record > _oplog_highestSeen) {
- _oplog_highestSeen = record;
- }
- }
- }
-
- RecordId CappedVisibilityManager::oplogStartHack() const {
- boost::lock_guard<boost::mutex> lk(_lock);
- if (_uncommittedRecords.empty()) {
- return _oplog_highestSeen;
- } else {
- return _uncommittedRecords.front();
- }
- }
-
- RocksRecordStore::RocksRecordStore(StringData ns, StringData id,
- rocksdb::DB* db, // not owned here
- std::string prefix, bool isCapped, int64_t cappedMaxSize,
- int64_t cappedMaxDocs,
- CappedDocumentDeleteCallback* cappedDeleteCallback)
- : RecordStore(ns),
- _db(db),
- _prefix(std::move(prefix)),
- _isCapped(isCapped),
- _cappedMaxSize(cappedMaxSize),
- _cappedMaxSizeSlack(std::min(cappedMaxSize / 10, int64_t(16 * 1024 * 1024))),
- _cappedMaxDocs(cappedMaxDocs),
- _cappedDeleteCallback(cappedDeleteCallback),
- _cappedDeleteCheckCount(0),
- _isOplog(NamespaceString::oplog(ns)),
- _oplogCounter(0),
- _cappedVisibilityManager((_isCapped || _isOplog) ? new CappedVisibilityManager()
- : nullptr),
- _ident(id.toString()),
- _dataSizeKey(std::string("\0\0\0\0", 4) + "datasize-" + id.toString()),
- _numRecordsKey(std::string("\0\0\0\0", 4) + "numrecords-" + id.toString()),
- _shuttingDown(false) {
-
- if (_isCapped) {
- invariant(_cappedMaxSize > 0);
- invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
- }
- else {
- invariant(_cappedMaxSize == -1);
- invariant(_cappedMaxDocs == -1);
- }
-
- // Get next id
- boost::scoped_ptr<rocksdb::Iterator> iter(
- RocksRecoveryUnit::NewIteratorNoSnapshot(_db, _prefix));
- iter->SeekToLast();
- if (iter->Valid()) {
- rocksdb::Slice lastSlice = iter->key();
- RecordId lastId = _makeRecordId(lastSlice);
- if (_isOplog || _isCapped) {
- _cappedVisibilityManager->updateHighestSeen(lastId);
- }
- _nextIdNum.store(lastId.repr() + 1);
- } else {
- // Need to start at 1 so we are always higher than RecordId::min()
- _nextIdNum.store(1);
- }
-
- // load metadata
- _numRecords.store(RocksRecoveryUnit::getCounterValue(_db, _numRecordsKey));
- _dataSize.store(RocksRecoveryUnit::getCounterValue(_db, _dataSizeKey));
- invariant(_dataSize.load() >= 0);
- invariant(_numRecords.load() >= 0);
-
- _hasBackgroundThread = RocksEngine::initRsOplogBackgroundThread(ns);
- }
-
- RocksRecordStore::~RocksRecordStore() {
- {
- boost::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
- _shuttingDown = true;
- }
- }
-
- int64_t RocksRecordStore::storageSize(OperationContext* txn, BSONObjBuilder* extraInfo,
- int infoLevel) const {
- // we're lying, but that's the best we can do for now
- // We need to make it multiple of 256 to make
- // jstests/concurrency/fsm_workloads/convert_to_capped_collection.js happy
- return static_cast<int64_t>(
- std::max(_dataSize.load() & (~255), static_cast<long long>(256)));
- }
-
- RecordData RocksRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
- RecordData rd = _getDataFor(_db, _prefix, txn, loc);
- massert(28605, "Didn't find RecordId in RocksRecordStore", (rd.data() != nullptr));
- return rd;
- }
-
- void RocksRecordStore::deleteRecord( OperationContext* txn, const RecordId& dl ) {
- std::string key(_makePrefixedKey(_prefix, dl));
-
- RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- if (!ru->transaction()->registerWrite(key)) {
- throw WriteConflictException();
- }
-
- std::string oldValue;
- ru->Get(key, &oldValue);
- int oldLength = oldValue.size();
-
- ru->writeBatch()->Delete(key);
-
- _changeNumRecords(txn, -1);
- _increaseDataSize(txn, -oldLength);
- }
-
- long long RocksRecordStore::numRecords(OperationContext* txn) const {
- RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
- return _numRecords.load(std::memory_order::memory_order_relaxed) +
- ru->getDeltaCounter(_numRecordsKey);
- }
-
- bool RocksRecordStore::cappedAndNeedDelete(long long dataSizeDelta,
- long long numRecordsDelta) const {
- invariant(_isCapped);
-
- if (_dataSize.load() + dataSizeDelta > _cappedMaxSize)
- return true;
-
- if ((_cappedMaxDocs != -1) && (_numRecords.load() + numRecordsDelta > _cappedMaxDocs))
- return true;
-
- return false;
- }
-
- int64_t RocksRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
- const RecordId& justInserted) {
- if (!_isCapped) {
- return 0;
- }
-
- // We only want to do the checks occasionally as they are expensive.
- // This variable isn't thread safe, but has loose semantics anyway.
- dassert(!_isOplog || _cappedMaxDocs == -1);
-
- long long dataSizeDelta = 0, numRecordsDelta = 0;
- if (!_isOplog) {
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- dataSizeDelta = ru->getDeltaCounter(_dataSizeKey);
- numRecordsDelta = ru->getDeltaCounter(_numRecordsKey);
- }
-
- if (!cappedAndNeedDelete(dataSizeDelta, numRecordsDelta)) {
- return 0;
- }
-
- // ensure only one thread at a time can do deletes, otherwise they'll conflict.
- boost::unique_lock<boost::timed_mutex> lock(_cappedDeleterMutex, boost::defer_lock);
-
- if (_cappedMaxDocs != -1) {
- lock.lock(); // Max docs has to be exact, so have to check every time.
- }
- else if(_hasBackgroundThread) {
- // We are foreground, and there is a background thread,
-
- // Check if we need some back pressure.
- if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack) {
- return 0;
- }
-
- // Back pressure needed!
- // We're not actually going to delete anything, but we're going to syncronize
- // on the deleter thread.
- (void)lock.timed_lock(boost::posix_time::millisec(200));
- return 0;
- } else {
- if (!lock.try_lock()) {
- // Someone else is deleting old records. Apply back-pressure if too far behind,
- // otherwise continue.
- if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack)
- return 0;
-
- if (!lock.timed_lock(boost::posix_time::millisec(200)))
- return 0;
-
- // If we already waited, let someone else do cleanup unless we are significantly
- // over the limit.
- if ((_dataSize.load() - _cappedMaxSize) < (2 * _cappedMaxSizeSlack))
- return 0;
- }
- }
-
- return cappedDeleteAsNeeded_inlock(txn, justInserted);
- }
-
- int64_t RocksRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn,
- const RecordId& justInserted) {
- // we do this is a sub transaction in case it aborts
- RocksRecoveryUnit* realRecoveryUnit =
- checked_cast<RocksRecoveryUnit*>(txn->releaseRecoveryUnit());
- invariant(realRecoveryUnit);
- txn->setRecoveryUnit(realRecoveryUnit->newRocksRecoveryUnit());
-
- int64_t dataSize = _dataSize.load() + realRecoveryUnit->getDeltaCounter(_dataSizeKey);
- int64_t numRecords = _numRecords.load() + realRecoveryUnit->getDeltaCounter(_numRecordsKey);
-
- int64_t sizeOverCap = (dataSize > _cappedMaxSize) ? dataSize - _cappedMaxSize : 0;
- int64_t sizeSaved = 0;
- int64_t docsOverCap = 0, docsRemoved = 0;
- if (_cappedMaxDocs != -1 && numRecords > _cappedMaxDocs) {
- docsOverCap = numRecords - _cappedMaxDocs;
- }
-
- try {
- WriteUnitOfWork wuow(txn);
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- boost::scoped_ptr<rocksdb::Iterator> iter(ru->NewIterator(_prefix));
- iter->SeekToFirst();
-
- RecordId newestOld;
- while ((sizeSaved < sizeOverCap || docsRemoved < docsOverCap) &&
- (docsRemoved < 20000) && iter->Valid()) {
-
- rocksdb::Slice slice = iter->key();
- newestOld = _makeRecordId(slice);
-
- // don't go past the record we just inserted
- if (newestOld >= justInserted) {
- break;
- }
-
- if (_shuttingDown) {
- break;
- }
-
- std::string key(_makePrefixedKey(_prefix, newestOld));
- if (!ru->transaction()->registerWrite(key)) {
- log() << "got conflict truncating capped, total docs removed " << docsRemoved;
- break;
- }
-
- auto oldValue = iter->value();
- ++docsRemoved;
- sizeSaved += oldValue.size();
-
- if (_cappedDeleteCallback) {
- uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(
- txn, newestOld,
- RecordData(static_cast<const char*>(oldValue.data()), oldValue.size())));
- }
-
- ru->writeBatch()->Delete(key);
- iter->Next();
- }
-
- if (docsRemoved > 0) {
- _changeNumRecords(txn, -docsRemoved);
- _increaseDataSize(txn, -sizeSaved);
- wuow.commit();
- }
- }
- catch ( const WriteConflictException& wce ) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit( realRecoveryUnit );
- log() << "got conflict truncating capped, ignoring";
- return 0;
- }
- catch ( ... ) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit( realRecoveryUnit );
- throw;
- }
-
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit( realRecoveryUnit );
- return docsRemoved;
-
- }
-
- StatusWith<RecordId> RocksRecordStore::insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota ) {
-
- if ( _isCapped && len > _cappedMaxSize ) {
- return StatusWith<RecordId>( ErrorCodes::BadValue,
- "object to insert exceeds cappedMaxSize" );
- }
-
- RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
-
- RecordId loc;
- if (_isOplog) {
- StatusWith<RecordId> status = oploghack::extractKey(data, len);
- if (!status.isOK()) {
- return status;
- }
- loc = status.getValue();
- _cappedVisibilityManager->updateHighestSeen(loc);
- } else if (_isCapped) {
- loc = _cappedVisibilityManager->getNextAndAddUncommittedRecord(
- txn, [&]() { return _nextId(); });
- } else {
- loc = _nextId();
- }
-
- // No need to register the write here, since we just allocated a new RecordId so no other
- // transaction can access this key before we commit
- ru->writeBatch()->Put(_makePrefixedKey(_prefix, loc), rocksdb::Slice(data, len));
-
- _changeNumRecords( txn, 1 );
- _increaseDataSize( txn, len );
-
- cappedDeleteAsNeeded(txn, loc);
-
- return StatusWith<RecordId>( loc );
- }
-
- StatusWith<RecordId> RocksRecordStore::insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota ) {
- const int len = doc->documentSize();
- boost::scoped_array<char> buf( new char[len] );
- doc->writeDocument( buf.get() );
-
- return insertRecord( txn, buf.get(), len, enforceQuota );
- }
-
- StatusWith<RecordId> RocksRecordStore::updateRecord( OperationContext* txn,
- const RecordId& loc,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier ) {
- std::string key(_makePrefixedKey(_prefix, loc));
-
- RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
- if (!ru->transaction()->registerWrite(key)) {
- throw WriteConflictException();
- }
-
- std::string old_value;
- auto status = ru->Get(key, &old_value);
-
- if ( !status.ok() ) {
- return StatusWith<RecordId>( ErrorCodes::InternalError, status.ToString() );
- }
-
- int old_length = old_value.size();
-
- ru->writeBatch()->Put(key, rocksdb::Slice(data, len));
-
- _increaseDataSize(txn, len - old_length);
-
- cappedDeleteAsNeeded(txn, loc);
-
- return StatusWith<RecordId>( loc );
- }
-
- bool RocksRecordStore::updateWithDamagesSupported() const {
- return false;
- }
-
- Status RocksRecordStore::updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages ) {
- invariant(false);
- return Status::OK();
- }
-
- RecordIterator* RocksRecordStore::getIterator(OperationContext* txn, const RecordId& start,
- const CollectionScanParams::Direction& dir)
- const {
- if (_isOplog && dir == CollectionScanParams::FORWARD) {
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- if (!ru->hasSnapshot() || ru->getOplogReadTill().isNull()) {
- // we don't have snapshot, we can update our view
- ru->setOplogReadTill(_cappedVisibilityManager->oplogStartHack());
- }
- }
-
- return new Iterator(txn, _db, _prefix, _cappedVisibilityManager, dir, start);
- }
-
- std::vector<RecordIterator*> RocksRecordStore::getManyIterators(OperationContext* txn) const {
- return {new Iterator(txn, _db, _prefix, _cappedVisibilityManager,
- CollectionScanParams::FORWARD, RecordId())};
- }
-
- Status RocksRecordStore::truncate( OperationContext* txn ) {
- // XXX once we have readable WriteBatch, also delete outstanding writes to
- // this collection in the WriteBatch
- boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
- while( !iter->isEOF() ) {
- RecordId loc = iter->getNext();
- deleteRecord( txn, loc );
- }
-
- return Status::OK();
- }
-
- Status RocksRecordStore::compact( OperationContext* txn,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* options,
- CompactStats* stats ) {
- std::string beginString(_makePrefixedKey(_prefix, RecordId()));
- std::string endString(_makePrefixedKey(_prefix, RecordId::max()));
- rocksdb::Slice beginRange(beginString);
- rocksdb::Slice endRange(endString);
- rocksdb::Status status = _db->CompactRange(&beginRange, &endRange);
- if ( status.ok() )
- return Status::OK();
- else
- return Status( ErrorCodes::InternalError, status.ToString() );
- }
-
- Status RocksRecordStore::validate( OperationContext* txn,
- bool full,
- bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results,
- BSONObjBuilder* output ) {
- // TODO validate that _numRecords and _dataSize are correct in scanData mode
- if ( scanData ) {
- bool invalidObject = false;
- size_t numRecords = 0;
- boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
- while( !iter->isEOF() ) {
- numRecords++;
- if (full) {
- RecordData data = dataFor(txn, iter->curr());
- size_t dataSize;
- const Status status = adaptor->validate(data, &dataSize);
- if (!status.isOK()) {
- results->valid = false;
- if (invalidObject) {
- results->errors.push_back("invalid object detected (see logs)");
- }
- invalidObject = true;
- log() << "Invalid object detected in " << _ns << ": " << status.reason();
- }
- }
- iter->getNext();
- }
- output->appendNumber("nrecords", numRecords);
- }
- else
- output->appendNumber("nrecords", numRecords(txn));
-
- return Status::OK();
- }
-
- void RocksRecordStore::appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const {
- string statsString;
- result->appendBool("capped", _isCapped);
- if (_isCapped) {
- result->appendIntOrLL("max", _cappedMaxDocs);
- result->appendIntOrLL("maxSize", _cappedMaxSize / scale);
- }
- }
-
- Status RocksRecordStore::oplogDiskLocRegister(OperationContext* txn, const OpTime& opTime) {
- invariant(_isOplog);
- StatusWith<RecordId> record = oploghack::keyForOptime(opTime);
- if (record.isOK()) {
- _cappedVisibilityManager->addUncommittedRecord(txn, record.getValue());
- }
-
- return record.getStatus();
- }
-
- /**
- * Return the RecordId of an oplog entry as close to startingPosition as possible without
- * being higher. If there are no entries <= startingPosition, return RecordId().
- */
- boost::optional<RecordId> RocksRecordStore::oplogStartHack(
- OperationContext* txn, const RecordId& startingPosition) const {
-
- if (!_isOplog) {
- return boost::none;
- }
-
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- ru->setOplogReadTill(_cappedVisibilityManager->oplogStartHack());
-
- boost::scoped_ptr<rocksdb::Iterator> iter(ru->NewIterator(_prefix));
- int64_t storage;
- iter->Seek(_makeKey(startingPosition, &storage));
- if (!iter->Valid()) {
- iter->SeekToLast();
- if (iter->Valid()) {
- // startingPosition is bigger than everything else
- return _makeRecordId(iter->key());
- } else {
- // record store is empty
- return RecordId();
- }
- }
-
- // We're at or past target:
- // 1) if we're at -- return
- // 2) if we're past -- do a prev()
- RecordId foundKey = _makeRecordId(iter->key());
- int cmp = startingPosition.compare(foundKey);
- if (cmp != 0) {
- // RocksDB invariant -- iterator needs to land at or past target when Seek-ing
- invariant(cmp < 0);
- // we're past target -- prev()
- iter->Prev();
- }
-
- if (!iter->Valid()) {
- // there are no entries <= startingPosition
- return RecordId();
- }
-
- return _makeRecordId(iter->key());
- }
-
- void RocksRecordStore::temp_cappedTruncateAfter( OperationContext* txn,
- RecordId end,
- bool inclusive ) {
- // copied from WiredTigerRecordStore::temp_cappedTruncateAfter()
- WriteUnitOfWork wuow(txn);
- boost::scoped_ptr<RecordIterator> iter( getIterator( txn, end ) );
- while( !iter->isEOF() ) {
- RecordId loc = iter->getNext();
- if ( end < loc || ( inclusive && end == loc ) ) {
- deleteRecord( txn, loc );
- }
- }
- wuow.commit();
- }
-
- rocksdb::ReadOptions RocksRecordStore::_readOptions(OperationContext* opCtx) {
- rocksdb::ReadOptions options;
- if ( opCtx ) {
- options.snapshot = RocksRecoveryUnit::getRocksRecoveryUnit( opCtx )->snapshot();
- }
- return options;
- }
-
- RecordId RocksRecordStore::_nextId() {
- invariant(!_isOplog);
- return RecordId(_nextIdNum.fetchAndAdd(1));
- }
-
- rocksdb::Slice RocksRecordStore::_makeKey(const RecordId& loc, int64_t* storage) {
- *storage = endian::nativeToBig(loc.repr());
- return rocksdb::Slice(reinterpret_cast<const char*>(storage), sizeof(*storage));
- }
-
- std::string RocksRecordStore::_makePrefixedKey(const std::string& prefix, const RecordId& loc) {
- int64_t storage;
- auto encodedLoc = _makeKey(loc, &storage);
- std::string key(prefix);
- key.append(encodedLoc.data(), encodedLoc.size());
- return key;
- }
-
- RecordId RocksRecordStore::_makeRecordId(const rocksdb::Slice& slice) {
- invariant(slice.size() == sizeof(int64_t));
- int64_t repr = endian::bigToNative(*reinterpret_cast<const int64_t*>(slice.data()));
- RecordId a(repr);
- return RecordId(repr);
- }
-
- bool RocksRecordStore::findRecord( OperationContext* txn,
- const RecordId& loc, RecordData* out ) const {
- RecordData rd = _getDataFor(_db, _prefix, txn, loc);
- if ( rd.data() == NULL )
- return false;
- *out = rd;
- return true;
- }
-
- RecordData RocksRecordStore::_getDataFor(rocksdb::DB* db, const std::string& prefix,
- OperationContext* txn, const RecordId& loc) {
- RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
-
- std::string valueStorage;
- auto status = ru->Get(_makePrefixedKey(prefix, loc), &valueStorage);
- if (!status.ok()) {
- if (status.IsNotFound()) {
- return RecordData(nullptr, 0);
- } else {
- log() << "rocks Get failed, blowing up: " << status.ToString();
- invariant(false);
- }
- }
-
- SharedBuffer data = SharedBuffer::allocate(valueStorage.size());
- memcpy(data.get(), valueStorage.data(), valueStorage.size());
- return RecordData(data, valueStorage.size());
- }
-
- void RocksRecordStore::_changeNumRecords(OperationContext* txn, int64_t amount) {
- RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- ru->incrementCounter(_numRecordsKey, &_numRecords, amount);
- }
-
- void RocksRecordStore::_increaseDataSize(OperationContext* txn, int64_t amount) {
- RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
- ru->incrementCounter(_dataSizeKey, &_dataSize, amount);
- }
-
- // --------
-
- RocksRecordStore::Iterator::Iterator(
- OperationContext* txn, rocksdb::DB* db, std::string prefix,
- boost::shared_ptr<CappedVisibilityManager> cappedVisibilityManager,
- const CollectionScanParams::Direction& dir, const RecordId& start)
- : _txn(txn),
- _db(db),
- _prefix(std::move(prefix)),
- _cappedVisibilityManager(cappedVisibilityManager),
- _dir(dir),
- _eof(true),
- _readUntilForOplog(RocksRecoveryUnit::getRocksRecoveryUnit(txn)->getOplogReadTill()),
- _iterator(RocksRecoveryUnit::getRocksRecoveryUnit(txn)->NewIterator(_prefix)) {
-
- _locate(start);
- }
-
- void RocksRecordStore::Iterator::_checkStatus() {
- if ( !_iterator->status().ok() )
- log() << "Rocks Iterator Error: " << _iterator->status().ToString();
- invariant( _iterator->status().ok() );
- }
-
- bool RocksRecordStore::Iterator::isEOF() {
- return _eof;
- }
-
- RecordId RocksRecordStore::Iterator::curr() {
- if (_eof) {
- return RecordId();
- }
-
- return _curr;
- }
-
- RecordId RocksRecordStore::Iterator::getNext() {
- if (_eof) {
- return RecordId();
- }
-
- RecordId toReturn = _curr;
-
- if ( _forward() )
- _iterator->Next();
- else
- _iterator->Prev();
-
- if (_iterator->Valid()) {
- _curr = _decodeCurr();
- if (_cappedVisibilityManager.get()) { // isCapped?
- if (_readUntilForOplog.isNull()) {
- // this is the normal capped case
- if (_cappedVisibilityManager->isCappedHidden(_curr)) {
- _eof = true;
- }
- } else {
- // this is for oplogs
- if (_curr > _readUntilForOplog ||
- (_curr == _readUntilForOplog &&
- _cappedVisibilityManager->isCappedHidden(_curr))) {
- _eof = true;
- }
- }
- } // isCapped?
- } else {
- _eof = true;
- // we leave _curr as it is on purpose
- }
-
- _checkStatus();
- _lastLoc = toReturn;
- return toReturn;
- }
-
- void RocksRecordStore::Iterator::invalidate( const RecordId& dl ) {
- // this should never be called
- }
-
- void RocksRecordStore::Iterator::saveState() {
- _iterator.reset();
- _txn = nullptr;
- }
-
- bool RocksRecordStore::Iterator::restoreState(OperationContext* txn) {
- _txn = txn;
- if (_eof) {
- return true;
- }
-
- auto ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
- _iterator.reset(ru->NewIterator(_prefix));
-
- RecordId saved = _lastLoc;
- _locate(_lastLoc);
-
- if (_eof) {
- _lastLoc = RecordId();
- } else if (_curr != saved) {
- // _cappedVisibilityManager is not-null when isCapped == true
- if (_cappedVisibilityManager.get() && saved != RecordId()) {
- // Doc was deleted either by cappedDeleteAsNeeded() or cappedTruncateAfter().
- // It is important that we error out in this case so that consumers don't
- // silently get 'holes' when scanning capped collections. We don't make
- // this guarantee for normal collections so it is ok to skip ahead in that case.
- _eof = true;
- return false;
- }
- // lastLoc was either deleted or never set (yielded before first call to getNext()),
- // so bump ahead to the next record.
- } else {
- // we found where we left off! we advanced to the next one
- getNext();
- _lastLoc = saved;
- }
-
- return true;
- }
-
- RecordData RocksRecordStore::Iterator::dataFor(const RecordId& loc) const {
- if (!_eof && loc == _curr && _iterator->Valid() && _iterator->status().ok()) {
- SharedBuffer data = SharedBuffer::allocate(_iterator->value().size());
- memcpy(data.get(), _iterator->value().data(), _iterator->value().size());
- return RecordData(std::move(data), _iterator->value().size());
- }
- return RocksRecordStore::_getDataFor(_db, _prefix, _txn, loc);
- }
-
- void RocksRecordStore::Iterator::_locate(const RecordId& loc) {
- if (_forward()) {
- if (loc.isNull()) {
- _iterator->SeekToFirst();
- } else {
- int64_t locStorage;
- _iterator->Seek(RocksRecordStore::_makeKey(loc, &locStorage));
- }
- _checkStatus();
- } else { // backward iterator
- if (loc.isNull()) {
- _iterator->SeekToLast();
- } else {
- // lower bound on reverse iterator
- int64_t locStorage;
- _iterator->Seek(RocksRecordStore::_makeKey(loc, &locStorage));
- _checkStatus();
- if (!_iterator->Valid()) {
- _iterator->SeekToLast();
- } else if (_decodeCurr() != loc) {
- _iterator->Prev();
- }
- }
- _checkStatus();
- }
- _eof = !_iterator->Valid();
- if (_eof) {
- _curr = loc;
- } else {
- _curr = _decodeCurr();
- }
- }
-
- RecordId RocksRecordStore::Iterator::_decodeCurr() const {
- invariant(_iterator && _iterator->Valid());
- return _makeRecordId(_iterator->key());
- }
-
- bool RocksRecordStore::Iterator::_forward() const {
- return _dir == CollectionScanParams::FORWARD;
- }
-
-}
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.h b/src/mongo/db/storage/rocks/rocks_record_store.h
deleted file mode 100644
index 7cacc8cb285..00000000000
--- a/src/mongo/db/storage/rocks/rocks_record_store.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/**
-* Copyright (C) 2014 MongoDB Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects for
-* all of the code used other than as permitted herein. If you modify file(s)
-* with this exception, you may extend this exception to your version of the
-* file(s), but you are not obligated to do so. If you do not wish to do so,
-* delete this exception statement from your version. If you delete this
-* exception statement from all source files in the program, then also delete
-* it in the license file.
-*/
-
-#pragma once
-
-#include <atomic>
-#include <boost/scoped_ptr.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/thread/mutex.hpp>
-#include <string>
-#include <memory>
-#include <vector>
-#include <functional>
-
-#include <rocksdb/options.h>
-
-#include "mongo/db/storage/capped_callback.h"
-#include "mongo/db/storage/record_store.h"
-#include "mongo/platform/atomic_word.h"
-
-namespace rocksdb {
- class DB;
- class Iterator;
- class Slice;
-}
-
-namespace mongo {
-
- class CappedVisibilityManager {
- public:
- CappedVisibilityManager() : _oplog_highestSeen(RecordId::min()) {}
- void dealtWithCappedRecord(const RecordId& record);
- void updateHighestSeen(const RecordId& record);
- void addUncommittedRecord(OperationContext* txn, const RecordId& record);
-
- // a bit hacky function, but does the job
- RecordId getNextAndAddUncommittedRecord(OperationContext* txn,
- std::function<RecordId()> nextId);
-
- bool isCappedHidden(const RecordId& record) const;
- RecordId oplogStartHack() const;
-
- private:
- void _addUncommittedRecord_inlock(OperationContext* txn, const RecordId& record);
-
- // protects the state
- mutable boost::mutex _lock;
- std::vector<RecordId> _uncommittedRecords;
- RecordId _oplog_highestSeen;
- };
-
- class RocksRecoveryUnit;
-
- class RocksRecordStore : public RecordStore {
- public:
- RocksRecordStore(StringData ns, StringData id, rocksdb::DB* db, std::string prefix,
- bool isCapped = false, int64_t cappedMaxSize = -1,
- int64_t cappedMaxDocs = -1,
- CappedDocumentDeleteCallback* cappedDeleteCallback = NULL);
-
- virtual ~RocksRecordStore();
-
- // name of the RecordStore implementation
- virtual const char* name() const { return "rocks"; }
-
- virtual long long dataSize(OperationContext* txn) const { return _dataSize.load(); }
-
- virtual long long numRecords( OperationContext* txn ) const;
-
- virtual bool isCapped() const { return _isCapped; }
-
- virtual int64_t storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo = NULL,
- int infoLevel = 0 ) const;
-
- // CRUD related
-
- virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
-
- virtual bool findRecord( OperationContext* txn,
- const RecordId& loc,
- RecordData* out ) const;
-
- virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota );
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota );
-
- virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier );
-
- virtual bool updateWithDamagesSupported() const;
-
- virtual Status updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages );
-
- virtual RecordIterator* getIterator( OperationContext* txn,
- const RecordId& start = RecordId(),
- const CollectionScanParams::Direction& dir =
- CollectionScanParams::FORWARD ) const;
-
- virtual std::vector<RecordIterator*> getManyIterators( OperationContext* txn ) const;
-
- virtual Status truncate( OperationContext* txn );
-
- virtual bool compactSupported() const { return true; }
- virtual bool compactsInPlace() const { return true; }
-
- virtual Status compact( OperationContext* txn,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* options,
- CompactStats* stats );
-
- virtual Status validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results, BSONObjBuilder* output );
-
- virtual void appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const;
-
- virtual void temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive);
-
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
- const RecordId& startingPosition) const;
-
- virtual Status oplogDiskLocRegister(OperationContext* txn, const OpTime& opTime);
-
- virtual void updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize) {
- // TODO
- }
-
- void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
- _cappedDeleteCallback = cb;
- }
- bool cappedMaxDocs() const { invariant(_isCapped); return _cappedMaxDocs; }
- bool cappedMaxSize() const { invariant(_isCapped); return _cappedMaxSize; }
- bool isOplog() const { return _isOplog; }
-
- int64_t cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted);
- int64_t cappedDeleteAsNeeded_inlock(OperationContext* txn, const RecordId& justInserted);
- boost::timed_mutex& cappedDeleterMutex() { return _cappedDeleterMutex; }
-
- static rocksdb::Comparator* newRocksCollectionComparator();
-
- private:
- // NOTE: RecordIterator might outlive the RecordStore. That's why we use all those
- // shared_ptrs
- class Iterator : public RecordIterator {
- public:
- Iterator(OperationContext* txn, rocksdb::DB* db, std::string prefix,
- boost::shared_ptr<CappedVisibilityManager> cappedVisibilityManager,
- const CollectionScanParams::Direction& dir, const RecordId& start);
-
- virtual bool isEOF();
- virtual RecordId curr();
- virtual RecordId getNext();
- virtual void invalidate(const RecordId& dl);
- virtual void saveState();
- virtual bool restoreState(OperationContext* txn);
- virtual RecordData dataFor( const RecordId& loc ) const;
-
- private:
- void _locate(const RecordId& loc);
- RecordId _decodeCurr() const;
- bool _forward() const;
- void _checkStatus();
-
- OperationContext* _txn;
- rocksdb::DB* _db; // not owned
- std::string _prefix;
- boost::shared_ptr<CappedVisibilityManager> _cappedVisibilityManager;
- CollectionScanParams::Direction _dir;
- bool _eof;
- const RecordId _readUntilForOplog;
- RecordId _curr;
- RecordId _lastLoc;
- boost::scoped_ptr<rocksdb::Iterator> _iterator;
- };
-
- /**
- * Returns a new ReadOptions struct, containing the snapshot held in opCtx, if opCtx is not
- * null
- */
- static rocksdb::ReadOptions _readOptions(OperationContext* opCtx = NULL);
-
- static RecordId _makeRecordId( const rocksdb::Slice& slice );
-
- static RecordData _getDataFor(rocksdb::DB* db, const std::string& prefix,
- OperationContext* txn, const RecordId& loc);
-
- RecordId _nextId();
- bool cappedAndNeedDelete(long long dataSizeDelta, long long numRecordsDelta) const;
-
- // The use of this function requires that the passed in storage outlives the returned Slice
- static rocksdb::Slice _makeKey(const RecordId& loc, int64_t* storage);
- static std::string _makePrefixedKey(const std::string& prefix, const RecordId& loc);
-
- void _changeNumRecords(OperationContext* txn, int64_t amount);
- void _increaseDataSize(OperationContext* txn, int64_t amount);
-
- rocksdb::DB* _db; // not owned
- std::string _prefix;
-
- const bool _isCapped;
- const int64_t _cappedMaxSize;
- const int64_t _cappedMaxSizeSlack; // when to start applying backpressure
- const int64_t _cappedMaxDocs;
- CappedDocumentDeleteCallback* _cappedDeleteCallback;
- mutable boost::timed_mutex _cappedDeleterMutex; // see comment in ::cappedDeleteAsNeeded
- int _cappedDeleteCheckCount; // see comment in ::cappedDeleteAsNeeded
-
- const bool _isOplog;
- int _oplogCounter;
-
- boost::shared_ptr<CappedVisibilityManager> _cappedVisibilityManager;
-
- std::string _ident;
- AtomicUInt64 _nextIdNum;
- std::atomic<long long> _dataSize;
- std::atomic<long long> _numRecords;
-
- const std::string _dataSizeKey;
- const std::string _numRecordsKey;
-
- bool _shuttingDown;
- bool _hasBackgroundThread;
- };
-}
diff --git a/src/mongo/db/storage/rocks/rocks_record_store_mock.cpp b/src/mongo/db/storage/rocks/rocks_record_store_mock.cpp
deleted file mode 100644
index d60bf6cb738..00000000000
--- a/src/mongo/db/storage/rocks/rocks_record_store_mock.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/rocks/rocks_engine.h"
-
-namespace mongo {
-
- // static
- bool RocksEngine::initRsOplogBackgroundThread(StringData ns) {
- return false;
- }
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp b/src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp
deleted file mode 100644
index 0fc8e64e037..00000000000
--- a/src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-
-#include <boost/thread/mutex.hpp>
-#include <set>
-
-#include "mongo/base/checked_cast.h"
-#include "mongo/db/client.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database.h"
-#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/global_environment_experiment.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/operation_context_impl.h"
-#include "mongo/db/storage/rocks/rocks_engine.h"
-#include "mongo/db/storage/rocks/rocks_record_store.h"
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-#include "mongo/util/background.h"
-#include "mongo/util/exit.h"
-#include "mongo/util/log.h"
-
-namespace mongo {
-
- namespace {
-
- std::set<NamespaceString> _backgroundThreadNamespaces;
- boost::mutex _backgroundThreadMutex;
-
- class RocksRecordStoreThread : public BackgroundJob {
- public:
- RocksRecordStoreThread(const NamespaceString& ns)
- : _ns(ns) {
- _name = std::string("RocksRecordStoreThread for ") + _ns.toString();
- }
-
- virtual std::string name() const {
- return _name;
- }
-
- /**
- * @return Number of documents deleted.
- */
- int64_t _deleteExcessDocuments() {
- if (!getGlobalEnvironment()->getGlobalStorageEngine()) {
- LOG(1) << "no global storage engine yet";
- return 0;
- }
-
- OperationContextImpl txn;
-
- try {
- ScopedTransaction transaction(&txn, MODE_IX);
-
- AutoGetDb autoDb(&txn, _ns.db(), MODE_IX);
- Database* db = autoDb.getDb();
- if (!db) {
- LOG(2) << "no local database yet";
- return 0;
- }
-
- Lock::CollectionLock collectionLock(txn.lockState(), _ns.ns(), MODE_IX);
- Collection* collection = db->getCollection(_ns);
- if (!collection) {
- LOG(2) << "no collection " << _ns;
- return 0;
- }
-
- Client::Context ctx(&txn, _ns, false);
- RocksRecordStore* rs =
- checked_cast<RocksRecordStore*>(collection->getRecordStore());
- WriteUnitOfWork wuow(&txn);
- boost::lock_guard<boost::timed_mutex> lock(rs->cappedDeleterMutex());
- int64_t removed = rs->cappedDeleteAsNeeded_inlock(&txn, RecordId::max());
- wuow.commit();
- return removed;
- }
- catch (const std::exception& e) {
- severe() << "error in RocksRecordStoreThread: " << e.what();
- fassertFailedNoTrace(!"error in RocksRecordStoreThread");
- }
- catch (...) {
- fassertFailedNoTrace(!"unknown error in RocksRecordStoreThread");
- }
- }
-
- virtual void run() {
- Client::initThread(_name.c_str());
-
- while (!inShutdown()) {
- int64_t removed = _deleteExcessDocuments();
- LOG(2) << "RocksRecordStoreThread deleted " << removed;
- if (removed == 0) {
- // If we removed 0 documents, sleep a bit in case we're on a laptop
- // or something to be nice.
- sleepmillis(1000);
- }
- else if(removed < 1000) {
- // 1000 is the batch size, so we didn't even do a full batch,
- // which is the most efficient.
- sleepmillis(10);
- }
- }
-
- cc().shutdown();
-
- log() << "shutting down";
- }
-
- private:
- NamespaceString _ns;
- std::string _name;
- };
-
- } // namespace
-
- // static
- bool RocksEngine::initRsOplogBackgroundThread(StringData ns) {
- if (!NamespaceString::oplog(ns)) {
- return false;
- }
-
- if (storageGlobalParams.repair) {
- LOG(1) << "not starting RocksRecordStoreThread for " << ns
- << " because we are in repair";
- return false;
- }
-
- boost::lock_guard<boost::mutex> lock(_backgroundThreadMutex);
- NamespaceString nss(ns);
- if (_backgroundThreadNamespaces.count(nss)) {
- log() << "RocksRecordStoreThread " << ns << " already started";
- }
- else {
- log() << "Starting RocksRecordStoreThread " << ns;
- BackgroundJob* backgroundThread = new RocksRecordStoreThread(nss);
- backgroundThread->go();
- _backgroundThreadNamespaces.insert(nss);
- }
- return true;
- }
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp b/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
deleted file mode 100644
index b6ebcc7dfc4..00000000000
--- a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
+++ /dev/null
@@ -1,461 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include <boost/filesystem/operations.hpp>
-#include <boost/scoped_ptr.hpp>
-#include <boost/shared_ptr.hpp>
-#include <memory>
-#include <vector>
-
-#include <rocksdb/comparator.h>
-#include <rocksdb/db.h>
-#include <rocksdb/options.h>
-#include <rocksdb/slice.h>
-
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/storage/record_store_test_harness.h"
-#include "mongo/db/storage/rocks/rocks_record_store.h"
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-#include "mongo/db/storage/rocks/rocks_transaction.h"
-#include "mongo/unittest/unittest.h"
-#include "mongo/unittest/temp_dir.h"
-
-namespace mongo {
-
- using boost::scoped_ptr;
- using boost::shared_ptr;
- using std::string;
-
- class RocksRecordStoreHarnessHelper : public HarnessHelper {
- public:
- RocksRecordStoreHarnessHelper() : _tempDir(_testNamespace) {
- boost::filesystem::remove_all(_tempDir.path());
- rocksdb::DB* db;
- rocksdb::Options options;
- options.create_if_missing = true;
- auto s = rocksdb::DB::Open(options, _tempDir.path(), &db);
- ASSERT(s.ok());
- _db.reset(db);
- }
-
- virtual RecordStore* newNonCappedRecordStore() {
- return newNonCappedRecordStore("foo.bar");
- }
- RecordStore* newNonCappedRecordStore(const std::string& ns) {
- return new RocksRecordStore(ns, "1", _db.get(), "prefix");
- }
-
- RecordStore* newCappedRecordStore(const std::string& ns, int64_t cappedMaxSize,
- int64_t cappedMaxDocs) {
- return new RocksRecordStore(ns, "1", _db.get(), "prefix", true, cappedMaxSize,
- cappedMaxDocs);
- }
-
- virtual RecoveryUnit* newRecoveryUnit() {
- return new RocksRecoveryUnit(&_transactionEngine, _db.get(), true);
- }
-
- private:
- string _testNamespace = "mongo-rocks-record-store-test";
- unittest::TempDir _tempDir;
- boost::scoped_ptr<rocksdb::DB> _db;
- RocksTransactionEngine _transactionEngine;
- };
-
- HarnessHelper* newHarnessHelper() { return new RocksRecordStoreHarnessHelper(); }
-
- TEST(RocksRecordStoreTest, Isolation1 ) {
- scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- RecordId loc1;
- RecordId loc2;
-
- {
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
-
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
-
- res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc2 = res.getValue();
-
- uow.commit();
- }
- }
-
- {
- scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
-
- scoped_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) );
- scoped_ptr<WriteUnitOfWork> w2( new WriteUnitOfWork( t2.get() ) );
-
- rs->dataFor( t1.get(), loc1 );
- rs->dataFor( t2.get(), loc1 );
-
- ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() );
- ASSERT_OK( rs->updateRecord( t1.get(), loc2, "B", 2, false, NULL ).getStatus() );
-
- // this should throw
- ASSERT_THROWS(rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL),
- WriteConflictException);
-
- w1->commit(); // this should succeed
- }
- }
-
- TEST(RocksRecordStoreTest, Isolation2 ) {
- scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- RecordId loc1;
- RecordId loc2;
-
- {
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
-
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
-
- res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc2 = res.getValue();
-
- uow.commit();
- }
- }
-
- {
- scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
-
- // ensure we start transactions
- rs->dataFor( t1.get(), loc2 );
- rs->dataFor( t2.get(), loc2 );
-
- {
- WriteUnitOfWork w( t1.get() );
- ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() );
- w.commit();
- }
-
- {
- WriteUnitOfWork w( t2.get() );
- ASSERT_EQUALS(string("a"), rs->dataFor(t2.get(), loc1).data());
- // this should fail as our version of loc1 is too old
- ASSERT_THROWS(rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL),
- WriteConflictException);
- }
- }
- }
-
- StatusWith<RecordId> insertBSON(scoped_ptr<OperationContext>& opCtx,
- scoped_ptr<RecordStore>& rs,
- const OpTime& opTime) {
- BSONObj obj = BSON( "ts" << opTime );
- WriteUnitOfWork wuow(opCtx.get());
- RocksRecordStore* rrs = dynamic_cast<RocksRecordStore*>(rs.get());
- invariant( rrs );
- Status status = rrs->oplogDiskLocRegister( opCtx.get(), opTime );
- if (!status.isOK())
- return StatusWith<RecordId>( status );
- StatusWith<RecordId> res = rs->insertRecord(opCtx.get(),
- obj.objdata(),
- obj.objsize(),
- false);
- if (res.isOK())
- wuow.commit();
- return res;
- }
-
- // TODO remove from here once mongo made the test generic
- TEST(RocksRecordStoreTest, OplogHack) {
- RocksRecordStoreHarnessHelper harnessHelper;
- scoped_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("local.oplog.foo"));
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
-
- // always illegal
- ASSERT_EQ(insertBSON(opCtx, rs, OpTime(2,-1)).getStatus(),
- ErrorCodes::BadValue);
-
- {
- BSONObj obj = BSON("not_ts" << OpTime(2,1));
- ASSERT_EQ(rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(),
- false ).getStatus(),
- ErrorCodes::BadValue);
-
- obj = BSON( "ts" << "not an OpTime" );
- ASSERT_EQ(rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(),
- false ).getStatus(),
- ErrorCodes::BadValue);
- }
-
- // currently dasserts
- // ASSERT_EQ(insertBSON(opCtx, rs, BSON("ts" << OpTime(-2,1))).getStatus(),
- // ErrorCodes::BadValue);
-
- // success cases
- ASSERT_EQ(insertBSON(opCtx, rs, OpTime(1,1)).getValue(),
- RecordId(1,1));
-
- ASSERT_EQ(insertBSON(opCtx, rs, OpTime(1,2)).getValue(),
- RecordId(1,2));
-
- ASSERT_EQ(insertBSON(opCtx, rs, OpTime(2,2)).getValue(),
- RecordId(2,2));
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- // find start
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), RecordId()); // nothing <=
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,1)), RecordId(1,2)); // between
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,2)), RecordId(2,2)); // ==
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2)); // > highest
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(2,2), false); // no-op
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2));
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), false); // deletes 2,2
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,2));
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), true); // deletes 1,2
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,1));
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- WriteUnitOfWork wuow(opCtx.get());
- ASSERT_OK(rs->truncate(opCtx.get())); // deletes 1,1 and leaves collection empty
- wuow.commit();
- }
-
- {
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId());
- }
- }
-
- TEST(RocksRecordStoreTest, OplogHackOnNonOplog) {
- RocksRecordStoreHarnessHelper harnessHelper;
- scoped_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("local.NOT_oplog.foo"));
-
- scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
-
- BSONObj obj = BSON( "ts" << OpTime(2,-1) );
- {
- WriteUnitOfWork wuow( opCtx.get() );
- ASSERT_OK(rs->insertRecord(opCtx.get(), obj.objdata(),
- obj.objsize(), false ).getStatus());
- wuow.commit();
- }
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), boost::none);
- }
-
- TEST(RocksRecordStoreTest, CappedOrder) {
- scoped_ptr<RocksRecordStoreHarnessHelper> harnessHelper(new RocksRecordStoreHarnessHelper());
- scoped_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 100000,10000));
-
- RecordId loc1;
-
- { // first insert a document
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
- uow.commit();
- }
- }
-
- {
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), loc1 ) );
- ASSERT( !it->isEOF() );
- ASSERT_EQ( loc1, it->getNext() );
- ASSERT( it->isEOF() );
- }
-
- {
- // now we insert 2 docs, but commit the 2nd one fiirst
- // we make sure we can't find the 2nd until the first is commited
- scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- scoped_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) );
- rs->insertRecord( t1.get(), "b", 2, false );
- // do not commit yet
-
- { // create 2nd doc
- scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork w2( t2.get() );
- rs->insertRecord( t2.get(), "c", 2, false );
- w2.commit();
- }
- }
-
- { // state should be the same
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), loc1 ) );
- ASSERT( !it->isEOF() );
- ASSERT_EQ( loc1, it->getNext() );
- ASSERT( it->isEOF() );
- }
-
- w1->commit();
- }
-
- { // now all 3 docs should be visible
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), loc1 ) );
- ASSERT( !it->isEOF() );
- ASSERT_EQ( loc1, it->getNext() );
- ASSERT( !it->isEOF() );
- it->getNext();
- ASSERT( !it->isEOF() );
- it->getNext();
- ASSERT( it->isEOF() );
- }
- }
-
- RecordId _oplogOrderInsertOplog( OperationContext* txn,
- scoped_ptr<RecordStore>& rs,
- int inc ) {
- OpTime opTime = OpTime(5,inc);
- RocksRecordStore* rrs = dynamic_cast<RocksRecordStore*>(rs.get());
- Status status = rrs->oplogDiskLocRegister( txn, opTime );
- ASSERT_OK( status );
- BSONObj obj = BSON( "ts" << opTime );
- StatusWith<RecordId> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false );
- ASSERT_OK( res.getStatus() );
- return res.getValue();
- }
-
- TEST(RocksRecordStoreTest, OplogOrder) {
- scoped_ptr<RocksRecordStoreHarnessHelper> harnessHelper(
- new RocksRecordStoreHarnessHelper());
- scoped_ptr<RecordStore> rs(
- harnessHelper->newCappedRecordStore("local.oplog.foo", 100000, 10000));
- {
- const RocksRecordStore* rrs = dynamic_cast<RocksRecordStore*>(rs.get());
- ASSERT( rrs->isOplog() );
- }
-
- RecordId loc1;
-
- { // first insert a document
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- loc1 = _oplogOrderInsertOplog( opCtx.get(), rs, 1 );
- uow.commit();
- }
- }
-
- {
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), loc1 ) );
- ASSERT( !it->isEOF() );
- ASSERT_EQ( loc1, it->getNext() );
- ASSERT( it->isEOF() );
- }
-
- {
- // now we insert 2 docs, but commit the 2nd one fiirst
- // we make sure we can't find the 2nd until the first is commited
- scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- scoped_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) );
- _oplogOrderInsertOplog( t1.get(), rs, 2 );
- // do not commit yet
-
- { // create 2nd doc
- scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork w2( t2.get() );
- _oplogOrderInsertOplog( t2.get(), rs, 3 );
- w2.commit();
- }
- }
-
- { // state should be the same
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), loc1 ) );
- ASSERT( !it->isEOF() );
- ASSERT_EQ( loc1, it->getNext() );
- ASSERT( it->isEOF() );
- }
-
- w1->commit();
- }
-
- { // now all 3 docs should be visible
- scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), loc1 ) );
- ASSERT( !it->isEOF() );
- ASSERT_EQ( loc1, it->getNext() );
- ASSERT( !it->isEOF() );
- it->getNext();
- ASSERT( !it->isEOF() );
- it->getNext();
- ASSERT( it->isEOF() );
- }
- }
-
-}
diff --git a/src/mongo/db/storage/rocks/rocks_recovery_unit.cpp b/src/mongo/db/storage/rocks/rocks_recovery_unit.cpp
deleted file mode 100644
index c56e7f2e44a..00000000000
--- a/src/mongo/db/storage/rocks/rocks_recovery_unit.cpp
+++ /dev/null
@@ -1,332 +0,0 @@
-/**
-* Copyright (C) 2014 MongoDB Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects for
-* all of the code used other than as permitted herein. If you modify file(s)
-* with this exception, you may extend this exception to your version of the
-* file(s), but you are not obligated to do so. If you do not wish to do so,
-* delete this exception statement from your version. If you delete this
-* exception statement from all source files in the program, then also delete
-* it in the license file.
-*/
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
-
-#include "mongo/platform/basic.h"
-#include "mongo/platform/endian.h"
-
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-
-#include <rocksdb/comparator.h>
-#include <rocksdb/db.h>
-#include <rocksdb/iterator.h>
-#include <rocksdb/slice.h>
-#include <rocksdb/options.h>
-#include <rocksdb/write_batch.h>
-#include <rocksdb/utilities/write_batch_with_index.h>
-
-#include "mongo/base/checked_cast.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/rocks/rocks_transaction.h"
-#include "mongo/db/storage/rocks/rocks_util.h"
-#include "mongo/util/log.h"
-
-namespace mongo {
- namespace {
- class PrefixStrippingIterator : public rocksdb::Iterator {
- public:
- // baseIterator is consumed
- PrefixStrippingIterator(std::string prefix, Iterator* baseIterator)
- : _prefix(std::move(prefix)),
- _prefixSlice(_prefix.data(), _prefix.size()),
- _baseIterator(baseIterator) {}
-
- virtual bool Valid() const {
- return _baseIterator->Valid() && _baseIterator->key().starts_with(_prefixSlice);
- }
-
- virtual void SeekToFirst() { _baseIterator->Seek(_prefixSlice); }
- virtual void SeekToLast() {
- std::string nextPrefix = std::move(rocksGetNextPrefix(_prefix));
- _baseIterator->Seek(nextPrefix);
- if (!_baseIterator->Valid()) {
- _baseIterator->SeekToLast();
- }
- if (_baseIterator->Valid() && !_baseIterator->key().starts_with(_prefixSlice)) {
- _baseIterator->Prev();
- }
- }
-
- virtual void Seek(const rocksdb::Slice& target) {
- std::unique_ptr<char[]> buffer(new char[_prefix.size() + target.size()]);
- memcpy(buffer.get(), _prefix.data(), _prefix.size());
- memcpy(buffer.get() + _prefix.size(), target.data(), target.size());
- _baseIterator->Seek(rocksdb::Slice(buffer.get(), _prefix.size() + target.size()));
- }
-
- virtual void Next() { _baseIterator->Next(); }
- virtual void Prev() { _baseIterator->Prev(); }
-
- virtual rocksdb::Slice key() const {
- rocksdb::Slice strippedKey = _baseIterator->key();
- strippedKey.remove_prefix(_prefix.size());
- return strippedKey;
- }
- virtual rocksdb::Slice value() const { return _baseIterator->value(); }
- virtual rocksdb::Status status() const { return _baseIterator->status(); }
-
- private:
- std::string _prefix;
- rocksdb::Slice _prefixSlice;
- std::unique_ptr<Iterator> _baseIterator;
- };
-
- } // anonymous namespace
-
- std::atomic<int> RocksRecoveryUnit::_totalLiveRecoveryUnits(0);
-
- RocksRecoveryUnit::RocksRecoveryUnit(RocksTransactionEngine* transactionEngine, rocksdb::DB* db,
- bool durable)
- : _transactionEngine(transactionEngine),
- _db(db),
- _durable(durable),
- _transaction(transactionEngine),
- _writeBatch(),
- _snapshot(NULL),
- _depth(0),
- _myTransactionCount(1) {
- RocksRecoveryUnit::_totalLiveRecoveryUnits.fetch_add(1, std::memory_order_relaxed);
- }
-
- RocksRecoveryUnit::~RocksRecoveryUnit() {
- _abort();
- RocksRecoveryUnit::_totalLiveRecoveryUnits.fetch_sub(1, std::memory_order_relaxed);
- }
-
- void RocksRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
- _depth++;
- }
-
- void RocksRecoveryUnit::commitUnitOfWork() {
- if (_depth > 1) {
- return; // only outermost gets committed.
- }
-
- if (_writeBatch) {
- _commit();
- }
-
- try {
- for (Changes::const_iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
- (*it)->commit();
- }
- _changes.clear();
- }
- catch (...) {
- std::terminate();
- }
-
- _releaseSnapshot();
- }
-
- void RocksRecoveryUnit::endUnitOfWork() {
- _depth--;
- if (_depth == 0) {
- _abort();
- }
- }
-
- bool RocksRecoveryUnit::awaitCommit() {
- // TODO
- return true;
- }
-
- void RocksRecoveryUnit::commitAndRestart() {
- invariant( _depth == 0 );
- commitUnitOfWork();
- }
-
- // lazily initialized because Recovery Units are sometimes initialized just for reading,
- // which does not require write batches
- rocksdb::WriteBatchWithIndex* RocksRecoveryUnit::writeBatch() {
- if (!_writeBatch) {
- // this assumes that default column family uses default comparator. change this if you
- // change default column family's comparator
- _writeBatch.reset(
- new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, true));
- }
-
- return _writeBatch.get();
- }
-
- void RocksRecoveryUnit::setOplogReadTill(const RecordId& record) { _oplogReadTill = record; }
-
- void RocksRecoveryUnit::registerChange(Change* change) { _changes.push_back(change); }
-
- SnapshotId RocksRecoveryUnit::getSnapshotId() const { return SnapshotId(_myTransactionCount); }
-
- void RocksRecoveryUnit::_releaseSnapshot() {
- if (_snapshot) {
- _db->ReleaseSnapshot(_snapshot);
- _snapshot = nullptr;
- }
- _myTransactionCount++;
- }
-
- void RocksRecoveryUnit::_commit() {
- invariant(_writeBatch);
- for (auto pair : _deltaCounters) {
- auto& counter = pair.second;
- counter._value->fetch_add(counter._delta, std::memory_order::memory_order_relaxed);
- long long newValue = counter._value->load(std::memory_order::memory_order_relaxed);
- int64_t littleEndian = static_cast<int64_t>(endian::littleToNative(newValue));
- const char* nr_ptr = reinterpret_cast<const char*>(&littleEndian);
- writeBatch()->Put(pair.first, rocksdb::Slice(nr_ptr, sizeof(littleEndian)));
- }
-
- if (_writeBatch->GetWriteBatch()->Count() != 0) {
- // Order of operations here is important. It needs to be synchronized with
- // _transaction.recordSnapshotId() and _db->GetSnapshot() and
- rocksdb::WriteOptions writeOptions;
- writeOptions.disableWAL = !_durable;
- auto status = _db->Write(rocksdb::WriteOptions(), _writeBatch->GetWriteBatch());
- if (!status.ok()) {
- log() << "uh oh: " << status.ToString();
- invariant(!"rocks write batch commit failed");
- }
- _transaction.commit();
- }
- _deltaCounters.clear();
- _writeBatch.reset();
- }
-
- void RocksRecoveryUnit::_abort() {
- try {
- for (Changes::const_reverse_iterator it = _changes.rbegin(), end = _changes.rend();
- it != end; ++it) {
- Change* change = *it;
- LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*change));
- change->rollback();
- }
- _changes.clear();
- }
- catch (...) {
- std::terminate();
- }
-
- _transaction.abort();
- _deltaCounters.clear();
- _writeBatch.reset();
-
- _releaseSnapshot();
- }
-
- const rocksdb::Snapshot* RocksRecoveryUnit::snapshot() {
- if ( !_snapshot ) {
- // Order of operations here is important. It needs to be synchronized with
- // _db->Write() and _transaction.commit()
- _transaction.recordSnapshotId();
- _snapshot = _db->GetSnapshot();
- }
-
- return _snapshot;
- }
-
- rocksdb::Status RocksRecoveryUnit::Get(const rocksdb::Slice& key, std::string* value) {
- if (_writeBatch && _writeBatch->GetWriteBatch()->Count() > 0) {
- boost::scoped_ptr<rocksdb::WBWIIterator> wb_iterator(_writeBatch->NewIterator());
- wb_iterator->Seek(key);
- if (wb_iterator->Valid() && wb_iterator->Entry().key == key) {
- const auto& entry = wb_iterator->Entry();
- if (entry.type == rocksdb::WriteType::kDeleteRecord) {
- return rocksdb::Status::NotFound();
- }
- // TODO avoid double copy
- *value = std::string(entry.value.data(), entry.value.size());
- return rocksdb::Status::OK();
- }
- }
- rocksdb::ReadOptions options;
- options.snapshot = snapshot();
- return _db->Get(options, key, value);
- }
-
- rocksdb::Iterator* RocksRecoveryUnit::NewIterator(std::string prefix) {
- rocksdb::ReadOptions options;
- options.snapshot = snapshot();
- auto iterator = _db->NewIterator(options);
- if (_writeBatch && _writeBatch->GetWriteBatch()->Count() > 0) {
- iterator = _writeBatch->NewIteratorWithBase(iterator);
- }
- return new PrefixStrippingIterator(std::move(prefix), iterator);
- }
-
- rocksdb::Iterator* RocksRecoveryUnit::NewIteratorNoSnapshot(rocksdb::DB* db,
- std::string prefix) {
- auto iterator = db->NewIterator(rocksdb::ReadOptions());
- return new PrefixStrippingIterator(std::move(prefix), iterator);
- }
-
- void RocksRecoveryUnit::incrementCounter(const rocksdb::Slice& counterKey,
- std::atomic<long long>* counter, long long delta) {
- if (delta == 0) {
- return;
- }
-
- auto pair = _deltaCounters.find(counterKey.ToString());
- if (pair == _deltaCounters.end()) {
- _deltaCounters[counterKey.ToString()] =
- mongo::RocksRecoveryUnit::Counter(counter, delta);
- } else {
- pair->second._delta += delta;
- }
- }
-
- long long RocksRecoveryUnit::getDeltaCounter(const rocksdb::Slice& counterKey) {
- auto counter = _deltaCounters.find(counterKey.ToString());
- if (counter == _deltaCounters.end()) {
- return 0;
- } else {
- return counter->second._delta;
- }
- }
-
- long long RocksRecoveryUnit::getCounterValue(rocksdb::DB* db, const rocksdb::Slice counterKey) {
- std::string value;
- auto s = db->Get(rocksdb::ReadOptions(), counterKey, &value);
- if (s.IsNotFound()) {
- return 0;
- } else if (!s.ok()) {
- log() << "Counter get failed " << s.ToString();
- invariant(!"Counter get failed");
- }
-
- int64_t ret;
- invariant(sizeof(ret) == value.size());
- memcpy(&ret, value.data(), sizeof(ret));
- // we store counters in little endian
- return static_cast<long long>(endian::littleToNative(ret));
- }
-
- RocksRecoveryUnit* RocksRecoveryUnit::getRocksRecoveryUnit(OperationContext* opCtx) {
- return checked_cast<RocksRecoveryUnit*>(opCtx->recoveryUnit());
- }
-
-}
diff --git a/src/mongo/db/storage/rocks/rocks_recovery_unit.h b/src/mongo/db/storage/rocks/rocks_recovery_unit.h
deleted file mode 100644
index 280e4b63f1e..00000000000
--- a/src/mongo/db/storage/rocks/rocks_recovery_unit.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/**
-* Copyright (C) 2014 MongoDB Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects for
-* all of the code used other than as permitted herein. If you modify file(s)
-* with this exception, you may extend this exception to your version of the
-* file(s), but you are not obligated to do so. If you do not wish to do so,
-* delete this exception statement from your version. If you delete this
-* exception statement from all source files in the program, then also delete
-* it in the license file.
-*/
-
-#pragma once
-
-#include <atomic>
-#include <map>
-#include <stack>
-#include <string>
-#include <vector>
-#include <unordered_map>
-
-#include <boost/scoped_ptr.hpp>
-#include <boost/shared_ptr.hpp>
-
-#include "mongo/base/disallow_copying.h"
-#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/record_id.h"
-#include "mongo/db/storage/recovery_unit.h"
-#include "mongo/db/storage/rocks/rocks_transaction.h"
-
-namespace rocksdb {
- class DB;
- class Snapshot;
- class WriteBatchWithIndex;
- class Comparator;
- class Status;
- class Slice;
- class Iterator;
-}
-
-namespace mongo {
-
- class OperationContext;
-
- class RocksRecoveryUnit : public RecoveryUnit {
- MONGO_DISALLOW_COPYING(RocksRecoveryUnit);
- public:
- RocksRecoveryUnit(RocksTransactionEngine* transactionEngine, rocksdb::DB* db, bool durable);
- virtual ~RocksRecoveryUnit();
-
- virtual void beginUnitOfWork(OperationContext* opCtx);
- virtual void commitUnitOfWork();
-
- virtual void endUnitOfWork();
-
- virtual bool awaitCommit();
-
- virtual void commitAndRestart();
-
- virtual void* writingPtr(void* data, size_t len) { invariant(!"don't call writingPtr"); }
-
- virtual void registerChange(Change* change);
-
- virtual void setRollbackWritesDisabled() {}
-
- virtual SnapshotId getSnapshotId() const;
-
- // local api
-
- rocksdb::WriteBatchWithIndex* writeBatch();
-
- const rocksdb::Snapshot* snapshot();
- bool hasSnapshot() { return _snapshot != nullptr; }
-
- RocksTransaction* transaction() { return &_transaction; }
-
- rocksdb::Status Get(const rocksdb::Slice& key, std::string* value);
-
- rocksdb::Iterator* NewIterator(std::string prefix);
-
- static rocksdb::Iterator* NewIteratorNoSnapshot(rocksdb::DB* db, std::string prefix);
-
- void incrementCounter(const rocksdb::Slice& counterKey,
- std::atomic<long long>* counter, long long delta);
-
- long long getDeltaCounter(const rocksdb::Slice& counterKey);
-
- static long long getCounterValue(rocksdb::DB* db, const rocksdb::Slice counterKey);
-
- void setOplogReadTill(const RecordId& loc);
- RecordId getOplogReadTill() const { return _oplogReadTill; }
-
- RocksRecoveryUnit* newRocksRecoveryUnit() {
- return new RocksRecoveryUnit(_transactionEngine, _db, _durable);
- }
-
- struct Counter {
- std::atomic<long long>* _value;
- long long _delta;
- Counter() : Counter(nullptr, 0) {}
- Counter(std::atomic<long long>* value, long long delta) : _value(value), _delta(delta) {}
- };
-
- typedef std::unordered_map<std::string, Counter> CounterMap;
-
- static RocksRecoveryUnit* getRocksRecoveryUnit(OperationContext* opCtx);
-
- static int getTotalLiveRecoveryUnits() { return _totalLiveRecoveryUnits.load(); }
-
- private:
- void _releaseSnapshot();
-
- void _commit();
-
- void _abort();
- RocksTransactionEngine* _transactionEngine; // not owned
- rocksdb::DB* _db; // not owned
-
- const bool _durable;
-
- RocksTransaction _transaction;
-
- boost::scoped_ptr<rocksdb::WriteBatchWithIndex> _writeBatch; // owned
-
- // bare because we need to call ReleaseSnapshot when we're done with this
- const rocksdb::Snapshot* _snapshot; // owned
-
- CounterMap _deltaCounters;
-
- typedef OwnedPointerVector<Change> Changes;
- Changes _changes;
-
- int _depth;
- uint64_t _myTransactionCount;
-
- RecordId _oplogReadTill;
-
- static std::atomic<int> _totalLiveRecoveryUnits;
- };
-
-}
diff --git a/src/mongo/db/storage/rocks/rocks_server_status.cpp b/src/mongo/db/storage/rocks/rocks_server_status.cpp
deleted file mode 100644
index 945369d93a3..00000000000
--- a/src/mongo/db/storage/rocks/rocks_server_status.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include <sstream>
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/rocks/rocks_server_status.h"
-#include "mongo/db/storage/rocks/rocks_recovery_unit.h"
-
-#include "boost/scoped_ptr.hpp"
-
-#include <rocksdb/db.h>
-
-#include "mongo/base/checked_cast.h"
-#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/storage/rocks/rocks_engine.h"
-#include "mongo/util/assert_util.h"
-#include "mongo/util/scopeguard.h"
-
-namespace mongo {
- using std::string;
-
- namespace {
- std::string PrettyPrintBytes(size_t bytes) {
- if (bytes < (16 << 10)) {
- return std::to_string(bytes) + "B";
- } else if (bytes < (16 << 20)) {
- return std::to_string(bytes >> 10) + "KB";
- } else if (bytes < (16LL << 30)) {
- return std::to_string(bytes >> 20) + "MB";
- } else {
- return std::to_string(bytes >> 30) + "GB";
- }
- }
- } // namespace
-
- RocksServerStatusSection::RocksServerStatusSection(RocksEngine* engine)
- : ServerStatusSection("RocksDB"), _engine(engine) {}
-
- bool RocksServerStatusSection::includeByDefault() const { return true; }
-
- BSONObj RocksServerStatusSection::generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
-
- BSONObjBuilder bob;
-
- // if the second is true, that means that we pass the value through PrettyPrintBytes
- std::vector<std::pair<std::string, bool>> properties = {
- {"stats", false},
- {"num-immutable-mem-table", false},
- {"mem-table-flush-pending", false},
- {"compaction-pending", false},
- {"background-errors", false},
- {"cur-size-active-mem-table", true},
- {"cur-size-all-mem-tables", true},
- {"num-entries-active-mem-table", false},
- {"num-entries-imm-mem-tables", false},
- {"estimate-table-readers-mem", true},
- {"num-snapshots", false},
- {"oldest-snapshot-time", false},
- {"num-live-versions", false}};
- for (auto const& property : properties) {
- std::string statsString;
- if (!_engine->getDB()->GetProperty("rocksdb." + property.first, &statsString)) {
- statsString = "<error> unable to retrieve statistics";
- }
- if (property.first == "stats") {
- // special casing because we want to turn it into array
- BSONArrayBuilder a;
- std::stringstream ss(statsString);
- std::string line;
- while (std::getline(ss, line)) {
- a.append(line);
- }
- bob.appendArray(property.first, a.arr());
- } else if (property.second) {
- bob.append(property.first, PrettyPrintBytes(std::stoll(statsString)));
- } else {
- bob.append(property.first, statsString);
- }
- }
- bob.append("total-live-recovery-units", RocksRecoveryUnit::getTotalLiveRecoveryUnits());
- bob.append("block-cache-usage", PrettyPrintBytes(_engine->getBlockCacheUsage()));
-
- return bob.obj();
- }
-
-} // namespace mongo
-
diff --git a/src/mongo/db/storage/rocks/rocks_server_status.h b/src/mongo/db/storage/rocks/rocks_server_status.h
deleted file mode 100644
index c07fc3a644b..00000000000
--- a/src/mongo/db/storage/rocks/rocks_server_status.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/db/commands/server_status.h"
-
-namespace mongo {
-
- class RocksEngine;
-
- /**
- * Adds "RocksDB" to the results of db.serverStatus().
- */
- class RocksServerStatusSection : public ServerStatusSection {
- public:
- RocksServerStatusSection(RocksEngine* engine);
- virtual bool includeByDefault() const;
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const;
-
- private:
- RocksEngine* _engine;
- };
-
-} // namespace mongo
diff --git a/src/mongo/db/storage/rocks/rocks_transaction.cpp b/src/mongo/db/storage/rocks/rocks_transaction.cpp
deleted file mode 100644
index fd2a9ca6039..00000000000
--- a/src/mongo/db/storage/rocks/rocks_transaction.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/rocks/rocks_transaction.h"
-
-#include <atomic>
-#include <map>
-#include <memory>
-#include <string>
-
-#include <boost/thread/locks.hpp>
-
-// for invariant()
-#include "mongo/util/assert_util.h"
-
-namespace mongo {
- RocksTransactionEngine::RocksTransactionEngine() : _latestSnapshotId(1), _nextTransactionId(1) {}
-
- std::list<uint64_t>::iterator RocksTransactionEngine::_getLatestSnapshotId_inlock() {
- return _activeSnapshots.insert(_activeSnapshots.end(), _latestSnapshotId);
- }
-
- bool RocksTransactionEngine::_isKeyCommittedAfterSnapshot_inlock(const std::string& key,
- uint64_t snapshotId) {
- auto iter = _keyInfo.find(key);
- return iter != _keyInfo.end() && iter->second.first > snapshotId;
- }
-
- void RocksTransactionEngine::_registerCommittedKey_inlock(const std::string& key,
- uint64_t newSnapshotId) {
- auto iter = _keyInfo.find(key);
- if (iter != _keyInfo.end()) {
- _keysSortedBySnapshot.erase(iter->second.second);
- _keyInfo.erase(iter);
- }
-
- auto listIter = _keysSortedBySnapshot.insert(_keysSortedBySnapshot.end(), {key, newSnapshotId});
- _keyInfo.insert({key, {newSnapshotId, listIter}});
- }
-
- void RocksTransactionEngine::_cleanUpKeysCommittedBeforeSnapshot_inlock(uint64_t snapshotId) {
- while (!_keysSortedBySnapshot.empty() && _keysSortedBySnapshot.begin()->second <= snapshotId) {
- auto keyInfoIter = _keyInfo.find(_keysSortedBySnapshot.begin()->first);
- invariant(keyInfoIter != _keyInfo.end());
- _keyInfo.erase(keyInfoIter);
- _keysSortedBySnapshot.pop_front();
- }
- }
-
- void RocksTransactionEngine::_cleanupSnapshot_inlock(
- const std::list<uint64_t>::iterator& snapshotIter) {
- bool needCleanup = _activeSnapshots.begin() == snapshotIter;
- _activeSnapshots.erase(snapshotIter);
- if (needCleanup) {
- _cleanUpKeysCommittedBeforeSnapshot_inlock(_activeSnapshots.empty()
- ? std::numeric_limits<uint64_t>::max()
- : *_activeSnapshots.begin());
- }
- }
-
- void RocksTransaction::commit() {
- if (_writtenKeys.empty()) {
- return;
- }
- uint64_t newSnapshotId = 0;
- {
- boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
- for (const auto& key : _writtenKeys) {
- invariant(
- !_transactionEngine->_isKeyCommittedAfterSnapshot_inlock(key, _snapshotId));
- invariant(_transactionEngine->_uncommittedTransactionId[key] == _transactionId);
- _transactionEngine->_uncommittedTransactionId.erase(key);
- }
- newSnapshotId = _transactionEngine->_latestSnapshotId + 1;
- for (const auto& key : _writtenKeys) {
- _transactionEngine->_registerCommittedKey_inlock(key, newSnapshotId);
- }
- _cleanup_inlock();
- _transactionEngine->_latestSnapshotId = newSnapshotId;
- }
- // cleanup
- _writtenKeys.clear();
- }
-
- bool RocksTransaction::registerWrite(const std::string& key) {
- boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
- if (_transactionEngine->_isKeyCommittedAfterSnapshot_inlock(key, _snapshotId)) {
- // write-committed write conflict
- return false;
- }
- auto uncommittedTransactionIter = _transactionEngine->_uncommittedTransactionId.find(key);
- if (uncommittedTransactionIter != _transactionEngine->_uncommittedTransactionId.end() &&
- uncommittedTransactionIter->second != _transactionId) {
- // write-uncommitted write conflict
- return false;
- }
- _writtenKeys.insert(key);
- _transactionEngine->_uncommittedTransactionId[key] = _transactionId;
- return true;
- }
-
- void RocksTransaction::abort() {
- if (_writtenKeys.empty() && !_snapshotInitialized) {
- return;
- }
- {
- boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
- for (const auto& key : _writtenKeys) {
- _transactionEngine->_uncommittedTransactionId.erase(key);
- }
- _cleanup_inlock();
- }
- _writtenKeys.clear();
- }
-
- void RocksTransaction::recordSnapshotId() {
- {
- boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
- _cleanup_inlock();
- _activeSnapshotsIter = _transactionEngine->_getLatestSnapshotId_inlock();
- }
- _snapshotId = *_activeSnapshotsIter;
- _snapshotInitialized = true;
- }
-
- void RocksTransaction::_cleanup_inlock() {
- if (_snapshotInitialized) {
- _transactionEngine->_cleanupSnapshot_inlock(_activeSnapshotsIter);
- _snapshotInitialized = false;
- _snapshotId = std::numeric_limits<uint64_t>::max();
- }
- }
-}
diff --git a/src/mongo/db/storage/rocks/rocks_transaction.h b/src/mongo/db/storage/rocks/rocks_transaction.h
deleted file mode 100644
index 880677a1e3b..00000000000
--- a/src/mongo/db/storage/rocks/rocks_transaction.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <atomic>
-#include <set>
-#include <unordered_map>
-#include <memory>
-#include <string>
-#include <list>
-
-#include <boost/thread/mutex.hpp>
-
-namespace mongo {
- class RocksTransaction;
-
- class RocksTransactionEngine {
- public:
- RocksTransactionEngine();
-
- private:
- // REQUIRES: transaction engine lock locked
- std::list<uint64_t>::iterator _getLatestSnapshotId_inlock();
-
- // REQUIRES: transaction engine lock locked
- // Cleans up the snapshot from the _activeSnapshots list
- void _cleanupSnapshot_inlock(const std::list<uint64_t>::iterator& snapshotIter);
-
- uint64_t _getNextTransactionId() {
- return _nextTransactionId.fetch_add(1);
- }
-
- // returns true if the key was committed after the snapshotId, thus causing a write
- // conflict
- // REQUIRES: transaction engine lock locked
- bool _isKeyCommittedAfterSnapshot_inlock(const std::string& key, uint64_t snapshotId);
-
- // REQUIRES: transaction engine lock locked
- void _registerCommittedKey_inlock(const std::string& key, uint64_t newSnapshotId);
-
- // REQUIRES: transaction engine lock locked
- void _cleanUpKeysCommittedBeforeSnapshot_inlock(uint64_t snapshotId);
-
- friend class RocksTransaction;
- uint64_t _latestSnapshotId;
- std::atomic<uint64_t> _nextTransactionId;
- // Lock when mutating state here
- boost::mutex _lock;
-
- // The following data structures keep information about when were the keys committed.
- // They can answer the following questions:
- // * Which stored committed key has the earliest snapshot
- // * When was a certain key committed
- // _keysSortedBySnapshot is a list of {key, sequence_id} and it's sorted by the sequence_id.
- // Committing keys are always monotonically increasing, so to keep it sorted we just need to
- // push to the end.
- // _keyInfo is a map from the key to the two-part information about the key:
- // * snapshot ID of the last commit to this key
- // * an iterator pointing to the corresponding entry in _keysSortedBySnapshot. This is used
- // to update the list at the same time as we update the _keyInfo
- // TODO optimize these structures to store only one key instead of two
- typedef std::list<std::pair<std::string, uint64_t>> KeysSortedBySnapshotList;
- typedef std::list<std::pair<std::string, uint64_t>>::iterator KeysSortedBySnapshotListIter;
- KeysSortedBySnapshotList _keysSortedBySnapshot;
- // map of key -> pair{seq_id, pointer to corresponding _keysSortedBySnapshot}
- std::unordered_map<std::string, std::pair<uint64_t, KeysSortedBySnapshotListIter>> _keyInfo;
-
- std::unordered_map<std::string, uint64_t> _uncommittedTransactionId;
-
- // this list is sorted
- std::list<uint64_t> _activeSnapshots;
- };
-
- class RocksTransaction {
- public:
- RocksTransaction(RocksTransactionEngine* transactionEngine)
- : _snapshotInitialized(false),
- _snapshotId(std::numeric_limits<uint64_t>::max()),
- _transactionId(transactionEngine->_getNextTransactionId()),
- _transactionEngine(transactionEngine) {}
-
- ~RocksTransaction() { abort(); }
-
- // returns true if OK
- // returns false on conflict
- bool registerWrite(const std::string& key);
-
- void commit();
-
- void abort();
-
- void recordSnapshotId();
-
- private:
- // REQUIRES: transaction engine lock locked
- void _cleanup_inlock();
-
- friend class RocksTransactionEngine;
- bool _snapshotInitialized;
- uint64_t _snapshotId;
- std::list<uint64_t>::iterator _activeSnapshotsIter;
- uint64_t _transactionId;
- RocksTransactionEngine* _transactionEngine;
- std::set<std::string> _writtenKeys;
- };
-}
diff --git a/src/mongo/db/storage/rocks/rocks_util.h b/src/mongo/db/storage/rocks/rocks_util.h
deleted file mode 100644
index f8611d60d84..00000000000
--- a/src/mongo/db/storage/rocks/rocks_util.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <string>
-
-namespace mongo {
-
- inline std::string rocksGetNextPrefix(const std::string& prefix) {
- // next prefix lexicographically, assume same length
- std::string nextPrefix(prefix);
- for (int i = static_cast<int>(nextPrefix.size()) - 1; i >= 0; --i) {
- nextPrefix[i]++;
- // if it's == 0, that means we've overflowed, so need to keep adding
- if (nextPrefix[i] != 0) {
- break;
- }
- }
- return nextPrefix;
- }
-
-} // namespace mongo