summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSulabh Mahajan <sulabh.mahajan@mongodb.com>2018-06-08 14:34:55 +1000
committerSulabh Mahajan <sulabh.mahajan@mongodb.com>2018-06-08 14:34:55 +1000
commite361973f0e994d7c5da603cb6436fd96f7180127 (patch)
tree97efe6bdfd0d0bf26e1c33365d7d4682ee03ad36
parente317b88bb811da53f73ddb992417ae05ea8fe451 (diff)
downloadmongo-e361973f0e994d7c5da603cb6436fd96f7180127.tar.gz
SERVER-33605 Mobile SE:Disable capped collections and related testing
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml3
-rw-r--r--etc/evergreen.yml8
-rw-r--r--jstests/aggregation/bugs/server3253.js10
-rw-r--r--jstests/auth/auth1.js1
-rw-r--r--jstests/auth/list_databases.js11
-rw-r--r--jstests/auth/profile.js3
-rw-r--r--jstests/auth/profile_access.js3
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js2
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js2
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js2
-rw-r--r--jstests/core/apitest_db.js35
-rw-r--r--jstests/core/apitest_db_profile_level.js40
-rw-r--r--jstests/core/apitest_dbcollection.js5
-rw-r--r--jstests/core/awaitdata_getmore_cmd.js1
-rw-r--r--jstests/core/bulk_insert_capped.js1
-rw-r--r--jstests/core/capped.js1
-rw-r--r--jstests/core/capped1.js2
-rw-r--r--jstests/core/capped5.js2
-rw-r--r--jstests/core/capped6.js1
-rw-r--r--jstests/core/capped_convertToCapped1.js4
-rw-r--r--jstests/core/capped_empty.js2
-rw-r--r--jstests/core/capped_max1.js2
-rw-r--r--jstests/core/capped_queries_and_id_index.js1
-rw-r--r--jstests/core/capped_update.js1
-rw-r--r--jstests/core/cappeda.js2
-rw-r--r--jstests/core/collation.js82
-rw-r--r--jstests/core/collation_convert_to_capped.js4
-rw-r--r--jstests/core/convert_to_capped.js4
-rw-r--r--jstests/core/create_collection_fail_cleanup.js1
-rw-r--r--jstests/core/evalb.js3
-rw-r--r--jstests/core/geo_s2cursorlimitskip.js2
-rw-r--r--jstests/core/getmore_cmd_maxtimems.js2
-rw-r--r--jstests/core/profile1.js3
-rw-r--r--jstests/core/profile2.js5
-rw-r--r--jstests/core/profile3.js5
-rw-r--r--jstests/core/profile_agg.js2
-rw-r--r--jstests/core/profile_count.js2
-rw-r--r--jstests/core/profile_delete.js2
-rw-r--r--jstests/core/profile_distinct.js2
-rw-r--r--jstests/core/profile_find.js2
-rw-r--r--jstests/core/profile_findandmodify.js5
-rw-r--r--jstests/core/profile_geonear.js2
-rw-r--r--jstests/core/profile_getmore.js2
-rw-r--r--jstests/core/profile_insert.js1
-rw-r--r--jstests/core/profile_list_collections.js2
-rw-r--r--jstests/core/profile_list_indexes.js2
-rw-r--r--jstests/core/profile_mapreduce.js2
-rw-r--r--jstests/core/profile_no_such_db.js2
-rw-r--r--jstests/core/profile_parallel_collection_scan.js2
-rw-r--r--jstests/core/profile_repair_cursor.js2
-rw-r--r--jstests/core/profile_sampling.js2
-rw-r--r--jstests/core/profile_update.js2
-rw-r--r--jstests/core/queryoptimizera.js2
-rw-r--r--jstests/core/rename.js2
-rw-r--r--jstests/core/rename7.js3
-rw-r--r--jstests/core/startup_log.js5
-rw-r--r--jstests/core/tailable_cursor_invalidation.js2
-rw-r--r--jstests/core/tailable_getmore_batch_size.js2
-rw-r--r--jstests/core/tailable_skip_limit.js2
-rw-r--r--jstests/libs/parallelTester.js2
-rw-r--r--jstests/noPassthrough/aggregation_cursor_invalidations.js2
-rw-r--r--jstests/noPassthrough/currentop_includes_await_time.js1
-rw-r--r--jstests/noPassthrough/latency_includes_lock_acquisition_time.js1
-rw-r--r--jstests/noPassthrough/log_find_getmore.js1
-rw-r--r--jstests/noPassthrough/profile_agg_multiple_batches.js1
-rw-r--r--jstests/noPassthrough/shell_appname_uri.js3
-rw-r--r--jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js2
-rw-r--r--jstests/noPassthrough/ttl_capped.js1
-rw-r--r--jstests/noPassthrough/write_local.js2
-rw-r--r--jstests/noPassthrough/yield_during_writes.js1
-rw-r--r--jstests/noPassthroughWithMongod/capped4.js1
-rw-r--r--jstests/noPassthroughWithMongod/capped_truncate.js2
-rw-r--r--jstests/noPassthroughWithMongod/clonecollection.js3
-rw-r--r--jstests/noPassthroughWithMongod/explain2.js3
-rw-r--r--jstests/noPassthroughWithMongod/find_cmd.js17
-rw-r--r--jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js1
-rw-r--r--jstests/noPassthroughWithMongod/query_oplogreplay.js2
-rw-r--r--jstests/noPassthroughWithMongod/rpc_protocols.js1
-rw-r--r--jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js1
-rw-r--r--src/mongo/db/db.cpp10
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h7
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp3
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h5
-rw-r--r--src/mongo/db/storage/mobile/mobile_kv_engine.cpp7
-rw-r--r--src/mongo/db/storage/mobile/mobile_kv_engine.h4
-rw-r--r--src/mongo/db/storage/storage_engine.h7
-rw-r--r--src/mongo/dbtests/clienttests.cpp2
-rw-r--r--src/mongo/dbtests/directclienttests.cpp5
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp10
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp10
-rw-r--r--src/mongo/dbtests/querytests.cpp65
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp20
101 files changed, 364 insertions, 172 deletions
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
index 6e37d3eb5d1..8eaa08c3f36 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
@@ -8,7 +8,8 @@ selector:
- jstests/core/txns/**/*.js
# The following tests fail because a certain command or functionality is not supported on
# mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/apitest_db.js # profiling.
+ - jstests/core/apitest_db.js # serverStatus output doesn't have storageEngine
+ - jstests/core/apitest_db_profile_level.js # profiling.
- jstests/core/apply_ops*.js # applyOps, SERVER-1439.
- jstests/core/capped_convertToCapped1.js # cloneCollectionAsCapped.
- jstests/core/capped_empty.js # emptycapped.
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
index 34218fa2665..69743d741d1 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
@@ -22,7 +22,8 @@ selector:
- jstests/core/system_profile.js
# The following tests fail because a certain command or functionality is not supported on
# mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/apitest_db.js # profiling.
+ - jstests/core/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/apitest_db_profile_level.js # profiling.
- jstests/core/apply_ops*.js # applyOps, SERVER-1439.
- jstests/core/capped_convertToCapped1.js # cloneCollectionAsCapped.
- jstests/core/capped_empty.js # emptycapped.
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
index 4b86fad9420..a4ab4146143 100644
--- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
@@ -10,7 +10,8 @@ selector:
- jstests/core/txns/**/*.js
# The following tests fail because a certain command or functionality is not supported by
# mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/apitest_db.js # profiling.
+ - jstests/core/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/apitest_db_profile_level.js # profiling.
- jstests/core/apply_ops*.js # applyOps, SERVER-1439.
- jstests/core/bypass_doc_validation.js # copyDatabase
- jstests/core/capped*.js # capped collections.
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
index d2468d9026f..c9e746bf608 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
@@ -8,7 +8,8 @@ selector:
- jstests/core/txns/**/*.js
# The following tests fail because a certain command or functionality is not supported by
# mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/apitest_db.js # profiling.
+ - jstests/core/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/apitest_db_profile_level.js # profiling.
- jstests/core/apply_ops*.js # applyOps, SERVER-1439.
- jstests/core/awaitdata_getmore_cmd.js # capped collections.
- jstests/core/bypass_doc_validation.js # copyDatabase
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
index 7a900c2f2c2..a150217bc2b 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
@@ -8,7 +8,8 @@ selector:
- jstests/core/txns/**/*.js
# The following tests fail because a certain command or functionality is not supported on
# mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/apitest_db.js # profiling.
+ - jstests/core/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/apitest_db_profile_level.js # profiling.
- jstests/core/apply_ops*.js # applyOps, SERVER-1439.
- jstests/core/capped6.js # captrunc.
- jstests/core/capped_convertToCapped1.js # cloneCollectionAsCapped.
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
index aa0fa6bd0d0..6be19ac20b8 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
@@ -8,7 +8,8 @@ selector:
- jstests/core/txns/**/*.js
# The following tests fail because a certain command or functionality is not supported on
# mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/apitest_db.js # profiling.
+ - jstests/core/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/apitest_db_profile_level.js # profiling.
- jstests/core/apply_ops*.js # applyOps, SERVER-1439.
- jstests/core/capped6.js # captrunc.
- jstests/core/capped_convertToCapped1.js # cloneCollectionAsCapped.
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 9b320d0c14e..45c1abb2827 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -4398,7 +4398,7 @@ tasks:
- func: "do setup"
- func: "run tests"
vars:
- resmoke_args: --suites=core --mongod=./mongoed --excludeWithAnyTags=requires_scripting,requires_auth,requires_sharding,does_not_support_stepdowns,requires_eval_command,requires_background_index,incompatible_with_embedded,incompatible_with_embedded_todo_investigate,requires_replication,SERVER-32997
+ resmoke_args: --suites=core --mongod=./mongoed --excludeWithAnyTags=requires_scripting,requires_auth,requires_sharding,does_not_support_stepdowns,requires_eval_command,requires_background_index,incompatible_with_embedded,incompatible_with_embedded_todo_investigate,requires_replication,requires_capped,requires_profiling,SERVER-32997
run_multiple_jobs: true
- <<: *task_template
@@ -11750,7 +11750,7 @@ buildvariants:
# mobile storage engine.
test_flags: >-
--storageEngine=mobile
- --excludeWithAnyTags=requires_mmapv1,requires_wiredtiger,requires_replication,requires_sharding,uses_transactions
+ --excludeWithAnyTags=requires_mmapv1,requires_wiredtiger,requires_replication,requires_sharding,uses_transactions,requires_capped,requires_profiling
--excludeWithAnyTags=SERVER-32709,SERVER-32869,SERVER-32993,SERVER-32997
compile_flags: >-
-j$(grep -c ^processor /proc/cpuinfo)
@@ -11807,7 +11807,7 @@ buildvariants:
expansions:
test_flags: >-
--storageEngine=mobile
- --excludeWithAnyTags=requires_mmapv1,requires_wiredtiger,requires_replication,requires_sharding,uses_transactions
+ --excludeWithAnyTags=requires_mmapv1,requires_wiredtiger,requires_replication,requires_sharding,uses_transactions,requires_capped,requires_profiling
--excludeWithAnyTags=SERVER-32709,SERVER-32869,SERVER-32993,SERVER-32997
compile_flags: >-
-j$(grep -c ^processor /proc/cpuinfo)
@@ -11889,7 +11889,7 @@ buildvariants:
# mobile storage engine.
test_flags: >-
--storageEngine=mobile
- --excludeWithAnyTags=requires_mmapv1,requires_wiredtiger,requires_replication,requires_sharding,uses_transactions
+ --excludeWithAnyTags=requires_mmapv1,requires_wiredtiger,requires_replication,requires_sharding,uses_transactions,requires_capped,requires_profiling
--excludeWithAnyTags=SERVER-32709,SERVER-32869,SERVER-32993,SERVER-32997
compile_env: DEVELOPER_DIR=/Applications/Xcode8.3.app
compile_flags: >-
diff --git a/jstests/aggregation/bugs/server3253.js b/jstests/aggregation/bugs/server3253.js
index a8d00d9a07b..51e355cad0b 100644
--- a/jstests/aggregation/bugs/server3253.js
+++ b/jstests/aggregation/bugs/server3253.js
@@ -91,10 +91,12 @@ test(input, [{$project: {c: {$concat: ["hello there ", "_id"]}}}], [
{_id: 3, c: "hello there _id"}
]);
-// test with capped collection
-cappedOutput.drop();
-db.createCollection(cappedOutput.getName(), {capped: true, size: 2});
-assertErrorCode(input, {$out: cappedOutput.getName()}, 17152);
+// test with capped collection, skip for mobile SE as it doesn't support capped collections.
+if (jsTest.options().storageEngine !== "mobile") {
+ cappedOutput.drop();
+ db.createCollection(cappedOutput.getName(), {capped: true, size: 2});
+ assertErrorCode(input, {$out: cappedOutput.getName()}, 17152);
+}
// ensure everything works even if input doesn't exist.
test(inputDoesntExist, [], []);
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
index cbf1139744f..854aaaf5610 100644
--- a/jstests/auth/auth1.js
+++ b/jstests/auth/auth1.js
@@ -1,5 +1,6 @@
// test read/write permissions
// skip this test on 32-bit platforms
+// @tags: [requires_profiling]
function setupTest() {
print("START auth1.js");
diff --git a/jstests/auth/list_databases.js b/jstests/auth/list_databases.js
index 5146328243d..830160ce332 100644
--- a/jstests/auth/list_databases.js
+++ b/jstests/auth/list_databases.js
@@ -30,13 +30,16 @@
admin.createUser({user: 'user3', pwd: 'pass', roles: readEven.concat(readWriteLow)});
admin.logout();
+ var admin_dbs = ["admin", "db0", "db1", "db2", "db3", "db4", "db5", "db6", "db7"];
+ // mobile storage engine might not have a local database
+ if (jsTest.options().storageEngine !== "mobile") {
+ admin_dbs.push("local");
+ }
+
[{user: "user1", dbs: ["db0", "db2", "db4", "db6"]},
{user: "user2", dbs: ["db0", "db1", "db2", "db3"]},
{user: "user3", dbs: ["db0", "db1", "db2", "db3", "db4", "db6"]},
- {
- user: "admin",
- dbs: ["admin", "db0", "db1", "db2", "db3", "db4", "db5", "db6", "db7", "local"]
- },
+ {user: "admin", dbs: admin_dbs},
].forEach(function(test) {
admin.auth(test.user, 'pass');
const dbs = assert.commandWorked(admin.runCommand({listDatabases: 1}));
diff --git a/jstests/auth/profile.js b/jstests/auth/profile.js
index 5e4e8b7d98b..1b63133edbc 100644
--- a/jstests/auth/profile.js
+++ b/jstests/auth/profile.js
@@ -1,4 +1,5 @@
// Check that username information gets recorded properly in profiler.
+// @tags: [requires_profiling]
var conn = MongoRunner.runMongod();
var db1 = conn.getDB("profile-a");
var db2 = db1.getSisterDB("profile-b");
@@ -49,4 +50,4 @@ assert((db1 == last.allUsers[0].db && db2 == last.allUsers[1].db) ||
db1.setProfilingLevel(0);
db1.dropDatabase();
db2.dropDatabase();
-MongoRunner.stopMongod(conn); \ No newline at end of file
+MongoRunner.stopMongod(conn);
diff --git a/jstests/auth/profile_access.js b/jstests/auth/profile_access.js
index 2dc4f028e5f..b31f7f502e0 100644
--- a/jstests/auth/profile_access.js
+++ b/jstests/auth/profile_access.js
@@ -1,3 +1,4 @@
+// @tags: [requires_profiling]
var conn = MongoRunner.runMongod({auth: ""});
var adminDb = conn.getDB("admin");
@@ -38,4 +39,4 @@ testDb.auth('dbAdminAnyDBUser', 'password');
testDb.setProfilingLevel(0);
testDb.system.profile.drop();
assert.commandWorked(testDb.createCollection("system.profile", {capped: true, size: 1024}));
-MongoRunner.stopMongod(conn, null, {user: 'admin', pwd: 'password'}); \ No newline at end of file
+MongoRunner.stopMongod(conn, null, {user: 'admin', pwd: 'password'});
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index c0ec6c0e071..af78b864283 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -5,6 +5,8 @@
*
* Repeatedly creates a capped collection. Also verifies that truncation
* occurs once the collection reaches a certain size.
+ *
+ * @tags: [requires_capped]
*/
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
index e29ed65a274..adc43f119f7 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
@@ -6,6 +6,8 @@
* Repeatedly creates a capped collection. Also verifies that truncation
* occurs once the collection reaches a certain size or contains a
* certain number of documents.
+ *
+ * @tags: [requires_capped]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
index ae4fa632c84..4b95e1d5b76 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
@@ -4,6 +4,8 @@
* create_index_background_unique_capped.js
*
* Creates multiple unique background indexes in parallel, on capped collections.
+ *
+ * @tags: [requires_capped]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
index cc3de60d3c0..1930c71d58f 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -6,6 +6,8 @@
* Creates a capped collection and then repeatedly executes the renameCollection
* command against it. The previous "to" namespace is used as the next "from"
* namespace.
+ *
+ * @tags: [requires_capped]
*/
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
index 93d52f8c251..385074cdd72 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -6,6 +6,8 @@
* Creates a capped collection and then repeatedly executes the renameCollection
* command against it, specifying a different database name in the namespace.
* The previous "to" namespace is used as the next "from" namespace.
+ *
+ * @tags: [requires_capped]
*/
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index b1c4c156c3f..5f5715bd8ac 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -6,6 +6,8 @@
* Creates a capped collection and then repeatedly executes the renameCollection
* command against it, specifying a different database name in the namespace.
* Inserts documents into the "to" namespace and specifies dropTarget=true.
+ *
+ * @tags: [requires_capped]
*/
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index 1d6dfd6faf4..e7aac0a9529 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -6,6 +6,8 @@
* Creates a capped collection and then repeatedly executes the renameCollection
* command against it. Inserts documents into the "to" namespace and specifies
* dropTarget=true.
+ *
+ * @tags: [requires_capped]
*/
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
diff --git a/jstests/core/apitest_db.js b/jstests/core/apitest_db.js
index 25101975240..478f0ef8f08 100644
--- a/jstests/core/apitest_db.js
+++ b/jstests/core/apitest_db.js
@@ -18,8 +18,6 @@ dd("b");
* be sure the public collection API is complete
*/
assert(db.createCollection, "createCollection");
-assert(db.getProfilingLevel, "getProfilingLevel");
-assert(db.setProfilingLevel, "setProfilingLevel");
assert(db.dbEval, "dbEval");
dd("c");
@@ -118,38 +116,5 @@ assert.docEq({storageEngine: validStorageEngineOptions},
dd("e");
-/*
- * profile level
- */
-
-// A test-specific database is used for profiler testing so as not to interfere with other tests
-// that modify profiler level, when run in parallel.
-var profileLevelDB = db.getSiblingDB("apitest_db_profile_level");
-
-profileLevelDB.setProfilingLevel(0);
-assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
-
-profileLevelDB.setProfilingLevel(1);
-assert(profileLevelDB.getProfilingLevel() == 1, "p1");
-
-profileLevelDB.setProfilingLevel(2);
-assert(profileLevelDB.getProfilingLevel() == 2, "p2");
-
-profileLevelDB.setProfilingLevel(0);
-assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
-
-dd("f");
-asserted = false;
-try {
- profileLevelDB.setProfilingLevel(10);
- assert(false);
-} catch (e) {
- asserted = true;
- assert(e.dbSetProfilingException);
-}
-assert(asserted, "should have asserted");
-
-dd("g");
-
assert.eq("foo", db.getSisterDB("foo").getName());
assert.eq("foo", db.getSiblingDB("foo").getName());
diff --git a/jstests/core/apitest_db_profile_level.js b/jstests/core/apitest_db_profile_level.js
new file mode 100644
index 00000000000..2172b4ed1cb
--- /dev/null
+++ b/jstests/core/apitest_db_profile_level.js
@@ -0,0 +1,40 @@
+/**
+ * Tests for setting of profile levels
+ * @tags: [does_not_support_stepdowns, requires_profiling]
+ */
+
+(function() {
+ 'use strict';
+
+ /*
+ * be sure the public collection API is complete
+ */
+ assert(db.getProfilingLevel, "getProfilingLevel");
+ assert(db.setProfilingLevel, "setProfilingLevel");
+
+ // A test-specific database is used for profiler testing so as not to interfere with
+ // other tests that modify profiler level, when run in parallel.
+ var profileLevelDB = db.getSiblingDB("apitest_db_profile_level");
+
+ profileLevelDB.setProfilingLevel(0);
+ assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
+
+ profileLevelDB.setProfilingLevel(1);
+ assert(profileLevelDB.getProfilingLevel() == 1, "p1");
+
+ profileLevelDB.setProfilingLevel(2);
+ assert(profileLevelDB.getProfilingLevel() == 2, "p2");
+
+ profileLevelDB.setProfilingLevel(0);
+ assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
+
+ var asserted = false;
+ try {
+ profileLevelDB.setProfilingLevel(10);
+ assert(false);
+ } catch (e) {
+ asserted = true;
+ assert(e.dbSetProfilingException);
+ }
+ assert(asserted, "should have asserted");
+})();
diff --git a/jstests/core/apitest_dbcollection.js b/jstests/core/apitest_dbcollection.js
index 384d4795f37..3e67b680282 100644
--- a/jstests/core/apitest_dbcollection.js
+++ b/jstests/core/apitest_dbcollection.js
@@ -4,10 +4,7 @@
* @tags: [
* requires_fastcount,
* requires_collstats,
- *
- * # indexDetails result is not correct with mobile storage engine.
- * # TODO SERVER-34579
- * incompatible_with_embedded_todo_investigate,
+ * requires_capped,
* ]
*/
diff --git a/jstests/core/awaitdata_getmore_cmd.js b/jstests/core/awaitdata_getmore_cmd.js
index 4a8bc332c86..6ea931a1523 100644
--- a/jstests/core/awaitdata_getmore_cmd.js
+++ b/jstests/core/awaitdata_getmore_cmd.js
@@ -6,6 +6,7 @@
// # routed to the primary.
// assumes_read_preference_unchanged,
// requires_getmore,
+// requires_capped,
// ]
(function() {
diff --git a/jstests/core/bulk_insert_capped.js b/jstests/core/bulk_insert_capped.js
index aaea7b6298b..50cc8f460dd 100644
--- a/jstests/core/bulk_insert_capped.js
+++ b/jstests/core/bulk_insert_capped.js
@@ -1,4 +1,5 @@
// @tags: [
+// requires_capped,
// # Cannot implicitly shard accessed collections because of collection existing when none
// # expected.
// assumes_no_implicit_collection_creation_after_drop,
diff --git a/jstests/core/capped.js b/jstests/core/capped.js
index 0b2945bba04..cb00c70cc4d 100644
--- a/jstests/core/capped.js
+++ b/jstests/core/capped.js
@@ -1,3 +1,4 @@
+// @tags: [requires_capped]
db.jstests_capped.drop();
db.createCollection("jstests_capped", {capped: true, size: 30000});
diff --git a/jstests/core/capped1.js b/jstests/core/capped1.js
index 923ee3aa668..cd4aa9c60ed 100644
--- a/jstests/core/capped1.js
+++ b/jstests/core/capped1.js
@@ -1,4 +1,4 @@
-
+// @tags: [requires_capped]
t = db.capped1;
t.drop();
diff --git a/jstests/core/capped5.js b/jstests/core/capped5.js
index 930cbabb462..0efb957ae6c 100644
--- a/jstests/core/capped5.js
+++ b/jstests/core/capped5.js
@@ -1,4 +1,4 @@
-
+// @tags: [requires_capped]
tn = "capped5";
t = db[tn];
diff --git a/jstests/core/capped6.js b/jstests/core/capped6.js
index d661b2f72fe..8d725c5e79c 100644
--- a/jstests/core/capped6.js
+++ b/jstests/core/capped6.js
@@ -7,6 +7,7 @@
// assumes_read_preference_unchanged,
// requires_non_retryable_commands,
// requires_fastcount,
+// requires_capped,
// ]
(function() {
var coll = db.capped6;
diff --git a/jstests/core/capped_convertToCapped1.js b/jstests/core/capped_convertToCapped1.js
index 62931b24a20..25137bbcf47 100644
--- a/jstests/core/capped_convertToCapped1.js
+++ b/jstests/core/capped_convertToCapped1.js
@@ -2,9 +2,7 @@
* @tags: [
* requires_non_retryable_commands,
* requires_fastcount,
- *
- * # capped collections is not available on embedded
- * incompatible_with_embedded,
+ * requires_capped,
* ]
*/
diff --git a/jstests/core/capped_empty.js b/jstests/core/capped_empty.js
index aaac7639f2a..2b10a513b4d 100644
--- a/jstests/core/capped_empty.js
+++ b/jstests/core/capped_empty.js
@@ -1,4 +1,4 @@
-// @tags: [requires_non_retryable_commands, requires_fastcount]
+// @tags: [requires_non_retryable_commands, requires_fastcount, requires_capped]
t = db.capped_empty;
t.drop();
diff --git a/jstests/core/capped_max1.js b/jstests/core/capped_max1.js
index 55c6c98569f..d26c05f644d 100644
--- a/jstests/core/capped_max1.js
+++ b/jstests/core/capped_max1.js
@@ -1,6 +1,6 @@
// Test max docs in capped collection
//
-// @tags: [requires_fastcount, requires_collstats]
+// @tags: [requires_fastcount, requires_collstats, requires_capped]
var t = db.capped_max1;
t.drop();
diff --git a/jstests/core/capped_queries_and_id_index.js b/jstests/core/capped_queries_and_id_index.js
index 45ab8187a6f..a10a4f60daf 100644
--- a/jstests/core/capped_queries_and_id_index.js
+++ b/jstests/core/capped_queries_and_id_index.js
@@ -1,4 +1,5 @@
// Tests the behavior of querying or updating a capped collection with and without an _id index.
+// @tags: [requires_capped]
(function() {
"use strict";
const coll = db.capped9;
diff --git a/jstests/core/capped_update.js b/jstests/core/capped_update.js
index 169da5affdb..47e938cb210 100644
--- a/jstests/core/capped_update.js
+++ b/jstests/core/capped_update.js
@@ -2,6 +2,7 @@
* Tests various update scenarios on capped collections:
* -- SERVER-20529: Ensure capped document sizes do not change
* -- SERVER-11983: Don't create _id field on capped updates
+ * @tags: [requires_capped]
*/
(function() {
'use strict';
diff --git a/jstests/core/cappeda.js b/jstests/core/cappeda.js
index f5c56a44e89..f506d5416b1 100644
--- a/jstests/core/cappeda.js
+++ b/jstests/core/cappeda.js
@@ -1,4 +1,4 @@
-
+// @tags: [requires_capped]
t = db.scan_capped_id;
t.drop();
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index 5a24b133de0..8458db5e4b3 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -692,18 +692,21 @@
assert.neq(null, planStage);
// Find with oplog replay should return correct results when no collation specified and
- // collection has a default collation.
- coll.drop();
- assert.commandWorked(db.createCollection(
- coll.getName(),
- {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024}));
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 0)}));
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 1)}));
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 2)}));
- assert.eq(2,
- coll.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
- .addOption(DBQuery.Option.oplogReplay)
- .itcount());
+ // collection has a default collation. Skip this test for the mobile SE because it doesn't
+ // support capped collections which are needed for oplog replay.
+ if (jsTest.options().storageEngine !== "mobile") {
+ coll.drop();
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 0)}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 1)}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 2)}));
+ assert.eq(2,
+ coll.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
+ .addOption(DBQuery.Option.oplogReplay)
+ .itcount());
+ }
// Find should return correct results for query containing $expr when no collation specified and
// collection has a default collation.
@@ -759,22 +762,25 @@
assert.eq(null, planStage);
// Find with oplog replay should return correct results when "simple" collation specified
- // and collection has a default collation.
- coll.drop();
- assert.commandWorked(db.createCollection(
- coll.getName(),
- {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024}));
- const t0 = Timestamp(1000, 0);
- const t1 = Timestamp(1000, 1);
- const t2 = Timestamp(1000, 2);
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 0)}));
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 1)}));
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 2)}));
- assert.eq(0,
- coll.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
- .addOption(DBQuery.Option.oplogReplay)
- .collation({locale: "simple"})
- .itcount());
+ // and collection has a default collation. Skip this test for the mobile SE because it
+ // doesn't support capped collections which are needed for oplog replay.
+ if (jsTest.options().storageEngine !== "mobile") {
+ coll.drop();
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024}));
+ const t0 = Timestamp(1000, 0);
+ const t1 = Timestamp(1000, 1);
+ const t2 = Timestamp(1000, 2);
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 0)}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 1)}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 2)}));
+ assert.eq(0,
+ coll.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
+ .addOption(DBQuery.Option.oplogReplay)
+ .collation({locale: "simple"})
+ .itcount());
+ }
}
// Find should select compatible index when no collation specified and collection has a default
@@ -2001,16 +2007,22 @@
coll.find({_id: "foo"}).toArray(),
"query should have performed a case-insensitive match");
- assert.commandWorked(db.runCommand({
+ var cloneCollOutput = db.runCommand({
cloneCollectionAsCapped: coll.getName(),
toCollection: clonedColl.getName(),
size: 4096
- }));
- const clonedCollectionInfos = db.getCollectionInfos({name: clonedColl.getName()});
- assert.eq(clonedCollectionInfos.length, 1, tojson(clonedCollectionInfos));
- assert.eq(originalCollectionInfos[0].options.collation,
- clonedCollectionInfos[0].options.collation);
- assert.eq([{_id: "FOO"}], clonedColl.find({_id: "foo"}).toArray());
+ });
+ if (jsTest.options().storageEngine === "mobile") {
+ // Capped collections are not supported by the mobile storage engine
+ assert.commandFailedWithCode(cloneCollOutput, ErrorCodes.InvalidOptions);
+ } else {
+ assert.commandWorked(cloneCollOutput);
+ const clonedCollectionInfos = db.getCollectionInfos({name: clonedColl.getName()});
+ assert.eq(clonedCollectionInfos.length, 1, tojson(clonedCollectionInfos));
+ assert.eq(originalCollectionInfos[0].options.collation,
+ clonedCollectionInfos[0].options.collation);
+ assert.eq([{_id: "FOO"}], clonedColl.find({_id: "foo"}).toArray());
+ }
}
// Test that the find command's min/max options respect the collation.
diff --git a/jstests/core/collation_convert_to_capped.js b/jstests/core/collation_convert_to_capped.js
index 7e77fd7b7fe..e1f79bafb76 100644
--- a/jstests/core/collation_convert_to_capped.js
+++ b/jstests/core/collation_convert_to_capped.js
@@ -4,9 +4,7 @@
*
* @tags: [
* requires_non_retryable_commands,
- *
- * # capped collections is not available on embedded
- * incompatible_with_embedded,
+ * requires_capped,
* ]
*/
diff --git a/jstests/core/convert_to_capped.js b/jstests/core/convert_to_capped.js
index 20476376cf4..e9a05f09450 100644
--- a/jstests/core/convert_to_capped.js
+++ b/jstests/core/convert_to_capped.js
@@ -3,9 +3,7 @@
*
* @tags: [
* requires_non_retryable_commands,
- *
- * # capped collections is not available on embedded
- * incompatible_with_embedded,
+ * requires_capped,
* ]
*/
diff --git a/jstests/core/create_collection_fail_cleanup.js b/jstests/core/create_collection_fail_cleanup.js
index 1417a54496c..6a0459f8c12 100644
--- a/jstests/core/create_collection_fail_cleanup.js
+++ b/jstests/core/create_collection_fail_cleanup.js
@@ -1,4 +1,5 @@
// SERVER-4560 test
+// @tags: [requires_capped]
var dbTest = db.getSisterDB("DB_create_collection_fail_cleanup");
dbTest.dropDatabase();
diff --git a/jstests/core/evalb.js b/jstests/core/evalb.js
index 92fbb81f31c..c943ab5c996 100644
--- a/jstests/core/evalb.js
+++ b/jstests/core/evalb.js
@@ -5,7 +5,8 @@
// does_not_support_stepdowns,
// requires_eval_command,
// requires_non_retryable_commands,
-// assumes_read_preference_unchanged
+// assumes_read_preference_unchanged,
+// requires_profiling
// ]
// Use a reserved database name to avoid a conflict in the parallel test suite.
diff --git a/jstests/core/geo_s2cursorlimitskip.js b/jstests/core/geo_s2cursorlimitskip.js
index ab907919108..87b3cf5b1b2 100644
--- a/jstests/core/geo_s2cursorlimitskip.js
+++ b/jstests/core/geo_s2cursorlimitskip.js
@@ -7,6 +7,8 @@
// assumes_read_preference_unchanged,
// does_not_support_stepdowns,
// requires_getmore,
+// requires_capped,
+// requires_profiling,
// ]
var testDB = db.getSiblingDB("geo_s2cursorlimitskip");
diff --git a/jstests/core/getmore_cmd_maxtimems.js b/jstests/core/getmore_cmd_maxtimems.js
index 755885b800d..7b13f858bc1 100644
--- a/jstests/core/getmore_cmd_maxtimems.js
+++ b/jstests/core/getmore_cmd_maxtimems.js
@@ -1,6 +1,6 @@
// Cannot implicitly shard accessed collections because of collection existing when none
// expected.
-// @tags: [assumes_no_implicit_collection_creation_after_drop, requires_getmore]
+// @tags: [assumes_no_implicit_collection_creation_after_drop, requires_getmore, requires_capped]
// Test attaching maxTimeMS to a getMore command.
(function() {
diff --git a/jstests/core/profile1.js b/jstests/core/profile1.js
index 4109c3ac55d..e3caa845147 100644
--- a/jstests/core/profile1.js
+++ b/jstests/core/profile1.js
@@ -1,4 +1,5 @@
-// @tags: [does_not_support_stepdowns, requires_non_retryable_commands, requires_collstats]
+// @tags: [does_not_support_stepdowns, requires_non_retryable_commands,
+// requires_collstats, requires_capped, requires_profiling]
(function() {
"use strict";
diff --git a/jstests/core/profile2.js b/jstests/core/profile2.js
index 7c206638a20..788f20f79a1 100644
--- a/jstests/core/profile2.js
+++ b/jstests/core/profile2.js
@@ -1,10 +1,7 @@
// Tests that large queries and updates are properly profiled.
// Special db so that it can be run in parallel tests.
-// @tags: [
-// # profile command is not available on embedded
-// incompatible_with_embedded,
-// ]
+// @tags: [requires_profiling]
var coll = db.getSisterDB("profile2").profile2;
diff --git a/jstests/core/profile3.js b/jstests/core/profile3.js
index 35e37d0917d..fac85a0cf73 100644
--- a/jstests/core/profile3.js
+++ b/jstests/core/profile3.js
@@ -1,7 +1,4 @@
-// @tags: [
-// # profile command is not available on embedded
-// incompatible_with_embedded,
-// ]
+// @tags: [requires_profiling]
// special db so that it can be run in parallel tests
var stddb = db;
var db = db.getSisterDB("profile3");
diff --git a/jstests/core/profile_agg.js b/jstests/core/profile_agg.js
index 9495250030a..d9502155ac9 100644
--- a/jstests/core/profile_agg.js
+++ b/jstests/core/profile_agg.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_profiling]
// Confirms that profiled aggregation execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_count.js b/jstests/core/profile_count.js
index be359f91ae6..9493278cd92 100644
--- a/jstests/core/profile_count.js
+++ b/jstests/core/profile_count.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns, requires_fastcount]
+// @tags: [does_not_support_stepdowns, requires_fastcount, requires_profiling]
// Confirms that profiled count execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_delete.js b/jstests/core/profile_delete.js
index e3a69e32d13..b9e4530a8c4 100644
--- a/jstests/core/profile_delete.js
+++ b/jstests/core/profile_delete.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns, requires_non_retryable_writes]
+// @tags: [does_not_support_stepdowns, requires_non_retryable_writes, requires_profiling]
// Confirms that profiled delete execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_distinct.js b/jstests/core/profile_distinct.js
index 2d825df811f..72d010636d6 100644
--- a/jstests/core/profile_distinct.js
+++ b/jstests/core/profile_distinct.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_profiling]
// Confirms that profiled distinct execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_find.js b/jstests/core/profile_find.js
index 60a430161ee..86ef0d66830 100644
--- a/jstests/core/profile_find.js
+++ b/jstests/core/profile_find.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_profiling]
// Confirms that profiled find execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_findandmodify.js b/jstests/core/profile_findandmodify.js
index 48ec80511eb..56e673ae639 100644
--- a/jstests/core/profile_findandmodify.js
+++ b/jstests/core/profile_findandmodify.js
@@ -1,8 +1,5 @@
// Confirms that profiled findAndModify execution contains all expected metrics with proper values.
-// @tags: [
-// # profile command is not available on embedded
-// incompatible_with_embedded,
-// ]
+// @tags: [requires_profiling]
(function() {
"use strict";
diff --git a/jstests/core/profile_geonear.js b/jstests/core/profile_geonear.js
index d8cb90a9d0c..c17cfc84bb6 100644
--- a/jstests/core/profile_geonear.js
+++ b/jstests/core/profile_geonear.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_profiling]
// Confirms that profiled geonear execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_getmore.js b/jstests/core/profile_getmore.js
index 6d16c291b91..344800dc011 100644
--- a/jstests/core/profile_getmore.js
+++ b/jstests/core/profile_getmore.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns, requires_getmore]
+// @tags: [does_not_support_stepdowns, requires_getmore, requires_profiling]
// Confirms that profiled getMore execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_insert.js b/jstests/core/profile_insert.js
index 6c32162701c..5f1bff8e2ea 100644
--- a/jstests/core/profile_insert.js
+++ b/jstests/core/profile_insert.js
@@ -3,6 +3,7 @@
// @tags: [
// assumes_write_concern_unchanged,
// does_not_support_stepdowns,
+// requires_profiling,
// ]
(function() {
diff --git a/jstests/core/profile_list_collections.js b/jstests/core/profile_list_collections.js
index 8ddd7b9f246..3db9e7971c9 100644
--- a/jstests/core/profile_list_collections.js
+++ b/jstests/core/profile_list_collections.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns, requires_getmore]
+// @tags: [does_not_support_stepdowns, requires_getmore, requires_profiling]
// Confirms that a listCollections command is not profiled.
diff --git a/jstests/core/profile_list_indexes.js b/jstests/core/profile_list_indexes.js
index 18062c71eac..2876a58ae90 100644
--- a/jstests/core/profile_list_indexes.js
+++ b/jstests/core/profile_list_indexes.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns, requires_getmore]
+// @tags: [does_not_support_stepdowns, requires_getmore, requires_profiling]
// Confirms that a listIndexes command and subsequent getMores of its cursor are profiled correctly.
diff --git a/jstests/core/profile_mapreduce.js b/jstests/core/profile_mapreduce.js
index 1e7b039b7ca..7d111779344 100644
--- a/jstests/core/profile_mapreduce.js
+++ b/jstests/core/profile_mapreduce.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_profiling]
// Confirms that profiled findAndModify execution contains all expected metrics with proper values.
diff --git a/jstests/core/profile_no_such_db.js b/jstests/core/profile_no_such_db.js
index cce8454fccb..5567f59f715 100644
--- a/jstests/core/profile_no_such_db.js
+++ b/jstests/core/profile_no_such_db.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_profiling]
// Test that reading the profiling level doesn't create databases, but setting it does.
(function(db) {
diff --git a/jstests/core/profile_parallel_collection_scan.js b/jstests/core/profile_parallel_collection_scan.js
index 602b13716c4..23363e7fbda 100644
--- a/jstests/core/profile_parallel_collection_scan.js
+++ b/jstests/core/profile_parallel_collection_scan.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns, requires_getmore]
+// @tags: [does_not_support_stepdowns, requires_getmore, requires_profiling]
// Confirms that a parallelCollectionScan command and subsequent getMores of its cursor are profiled
// correctly.
diff --git a/jstests/core/profile_repair_cursor.js b/jstests/core/profile_repair_cursor.js
index ad70bed7de0..f22399c58ff 100644
--- a/jstests/core/profile_repair_cursor.js
+++ b/jstests/core/profile_repair_cursor.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_profiling]
// Confirms that a repairCursor command and subsequent getMores of its cursor are profiled
// correctly.
diff --git a/jstests/core/profile_sampling.js b/jstests/core/profile_sampling.js
index 533fd848214..9b37e274055 100644
--- a/jstests/core/profile_sampling.js
+++ b/jstests/core/profile_sampling.js
@@ -1,5 +1,5 @@
// Confirms that the number of profiled operations is consistent with the sampleRate, if set.
-// @tags: [does_not_support_stepdowns, requires_fastcount]
+// @tags: [does_not_support_stepdowns, requires_fastcount, requires_profiling]
(function() {
"use strict";
diff --git a/jstests/core/profile_update.js b/jstests/core/profile_update.js
index 1e272ba6669..bf2cfe02714 100644
--- a/jstests/core/profile_update.js
+++ b/jstests/core/profile_update.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns, requires_non_retryable_writes]
+// @tags: [does_not_support_stepdowns, requires_non_retryable_writes, requires_profiling]
// Confirms that profiled update execution contains all expected metrics with proper values.
diff --git a/jstests/core/queryoptimizera.js b/jstests/core/queryoptimizera.js
index f96f80c45e6..f6950c81113 100644
--- a/jstests/core/queryoptimizera.js
+++ b/jstests/core/queryoptimizera.js
@@ -1,4 +1,4 @@
-// @tags: [does_not_support_stepdowns]
+// @tags: [does_not_support_stepdowns, requires_capped]
// Check that a warning message about doing a capped collection scan for a query with an _id
// constraint is printed at appropriate times. SERVER-5353
diff --git a/jstests/core/rename.js b/jstests/core/rename.js
index 6e40f8a1950..415fe665a53 100644
--- a/jstests/core/rename.js
+++ b/jstests/core/rename.js
@@ -1,4 +1,4 @@
-// @tags: [requires_non_retryable_commands, requires_fastcount]
+// @tags: [requires_non_retryable_commands, requires_fastcount, requires_capped]
admin = db.getMongo().getDB("admin");
diff --git a/jstests/core/rename7.js b/jstests/core/rename7.js
index 10a9250f3be..b50684a1002 100644
--- a/jstests/core/rename7.js
+++ b/jstests/core/rename7.js
@@ -1,4 +1,5 @@
-// @tags: [requires_non_retryable_commands, requires_fastcount, requires_collstats]
+// @tags: [requires_non_retryable_commands, requires_fastcount,
+// requires_collstats, requires_capped]
// ***************************************************************
// rename7.js
diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js
index b8021675d44..1c66e463f43 100644
--- a/jstests/core/startup_log.js
+++ b/jstests/core/startup_log.js
@@ -6,10 +6,7 @@
* @tags: [
* assumes_read_preference_unchanged,
* requires_collstats,
- *
- * # mongoe modifies commandline internally so the check for unaltered getCmdLineOpts fails,
- * # figure out if this can be fixed
- * incompatible_with_embedded_todo_investigate,
+ * requires_capped,
* ]
*/
diff --git a/jstests/core/tailable_cursor_invalidation.js b/jstests/core/tailable_cursor_invalidation.js
index e9e29e8a24a..856dfc9c5c4 100644
--- a/jstests/core/tailable_cursor_invalidation.js
+++ b/jstests/core/tailable_cursor_invalidation.js
@@ -1,4 +1,4 @@
-// @tags: [requires_getmore]
+// @tags: [requires_getmore, requires_capped]
// Tests for the behavior of tailable cursors when a collection is dropped or the cursor is
// otherwise invalidated.
diff --git a/jstests/core/tailable_getmore_batch_size.js b/jstests/core/tailable_getmore_batch_size.js
index 212eae5576d..9e96f6f68a3 100644
--- a/jstests/core/tailable_getmore_batch_size.js
+++ b/jstests/core/tailable_getmore_batch_size.js
@@ -1,4 +1,4 @@
-// @tags: [requires_getmore]
+// @tags: [requires_getmore, requires_capped]
// Tests for the behavior of combining the tailable and awaitData options to the getMore command
// with the batchSize option.
diff --git a/jstests/core/tailable_skip_limit.js b/jstests/core/tailable_skip_limit.js
index 8aa39e54e8a..8669e29a836 100644
--- a/jstests/core/tailable_skip_limit.js
+++ b/jstests/core/tailable_skip_limit.js
@@ -1,4 +1,4 @@
-// @tags: [requires_getmore]
+// @tags: [requires_getmore, requires_capped]
// Test that tailable cursors work correctly with skip and limit.
(function() {
diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js
index 4e680f703b3..2fbde0eb4f8 100644
--- a/jstests/libs/parallelTester.js
+++ b/jstests/libs/parallelTester.js
@@ -247,7 +247,7 @@ if (typeof _threadInject != "undefined") {
// Most profiler tests can be run in parallel with each other as they use test-specific
// databases, with the exception of tests which modify slowms or the profiler's sampling
// rate, since those affect profile settings globally.
- parallelFilesDir + "/apitest_db.js",
+ parallelFilesDir + "/apitest_db_profile_level.js",
parallelFilesDir + "/evalb.js",
parallelFilesDir + "/geo_s2cursorlimitskip.js",
parallelFilesDir + "/profile1.js",
diff --git a/jstests/noPassthrough/aggregation_cursor_invalidations.js b/jstests/noPassthrough/aggregation_cursor_invalidations.js
index e35598dc2dd..8cfac15b77b 100644
--- a/jstests/noPassthrough/aggregation_cursor_invalidations.js
+++ b/jstests/noPassthrough/aggregation_cursor_invalidations.js
@@ -7,7 +7,7 @@
* to request more documents from the collection. If the pipeline is wrapped in a $facet stage, all
* results will be computed in the initial request and buffered in the results array, preventing the
* pipeline from requesting more documents.
- * @tags: [do_not_wrap_aggregations_in_facets]
+ * @tags: [do_not_wrap_aggregations_in_facets, requires_capped]
*/
(function() {
'use strict';
diff --git a/jstests/noPassthrough/currentop_includes_await_time.js b/jstests/noPassthrough/currentop_includes_await_time.js
index 0c34999a512..154eec5d9ae 100644
--- a/jstests/noPassthrough/currentop_includes_await_time.js
+++ b/jstests/noPassthrough/currentop_includes_await_time.js
@@ -1,6 +1,7 @@
/**
* Test that the operation latencies reported in current op for a getMore on an awaitData cursor
* include time spent blocking for the await time.
+ * @tags: [requires_capped]
*/
(function() {
"use test";
diff --git a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
index ad0c6856a1c..e3f10dade92 100644
--- a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
+++ b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
@@ -1,6 +1,7 @@
/**
* Test that latency reported in the profiler and logs include lock acquisition time for various
* CRUD operations.
+ * @tags: [requires_profiling]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthrough/log_find_getmore.js b/jstests/noPassthrough/log_find_getmore.js
index a2b3d911f3f..94447948632 100644
--- a/jstests/noPassthrough/log_find_getmore.js
+++ b/jstests/noPassthrough/log_find_getmore.js
@@ -1,6 +1,7 @@
/**
* Confirms that the log output for command and legacy find and getMore are in the expected format.
* Legacy operations should be upconverted to match the format of their command counterparts.
+ * @tags: [requires_profiling]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthrough/profile_agg_multiple_batches.js b/jstests/noPassthrough/profile_agg_multiple_batches.js
index d955efa3a4e..00fb738aca2 100644
--- a/jstests/noPassthrough/profile_agg_multiple_batches.js
+++ b/jstests/noPassthrough/profile_agg_multiple_batches.js
@@ -1,5 +1,6 @@
// Tests that keysExamined and docsExamined are correct for aggregation when multiple batches pass
// through DocumentSourceCursor.
+// @tags: [requires_profiling]
(function() {
"use strict";
diff --git a/jstests/noPassthrough/shell_appname_uri.js b/jstests/noPassthrough/shell_appname_uri.js
index 213e369eb36..e7c43164c11 100644
--- a/jstests/noPassthrough/shell_appname_uri.js
+++ b/jstests/noPassthrough/shell_appname_uri.js
@@ -1,3 +1,4 @@
+// @tags: [requires_profiling]
(function() {
"use strict";
@@ -73,4 +74,4 @@
});
MongoRunner.stopMongod(conn);
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
index 7c37d73ebdd..e1aa184efa2 100644
--- a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
+++ b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
@@ -1,7 +1,7 @@
// Tests that specifying a maxTimeMS on a getMore request to mongos is not interpreted as a deadline
// for the operationfor a tailable + awaitData cursor.
// This test was designed to reproduce SERVER-33942 against a mongos.
-// @tags: [requires_sharding]
+// @tags: [requires_sharding, requires_capped]
(function() {
"use strict";
diff --git a/jstests/noPassthrough/ttl_capped.js b/jstests/noPassthrough/ttl_capped.js
index 8a3de697479..d3d383cc984 100644
--- a/jstests/noPassthrough/ttl_capped.js
+++ b/jstests/noPassthrough/ttl_capped.js
@@ -1,6 +1,7 @@
/**
* Test that a TTL index on a capped collection doesn't crash the server or cause the TTL monitor
* to skip processing other (non-capped) collections on the database.
+ * @tags: [requires_capped]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthrough/write_local.js b/jstests/noPassthrough/write_local.js
index d88dc9350e7..6eff980d4a9 100644
--- a/jstests/noPassthrough/write_local.js
+++ b/jstests/noPassthrough/write_local.js
@@ -1,5 +1,5 @@
// SERVER-22011: Deadlock in ticket distribution
-// @tags: [requires_replication]
+// @tags: [requires_replication, requires_capped]
(function() {
'use strict';
diff --git a/jstests/noPassthrough/yield_during_writes.js b/jstests/noPassthrough/yield_during_writes.js
index 035b6469ee6..4d05c725659 100644
--- a/jstests/noPassthrough/yield_during_writes.js
+++ b/jstests/noPassthrough/yield_during_writes.js
@@ -1,4 +1,5 @@
// Ensure that multi-update and multi-remove operations yield regularly.
+// @tags: [requires_profiling]
(function() {
'use strict';
diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js
index 039f2557866..bfa92cef26d 100644
--- a/jstests/noPassthroughWithMongod/capped4.js
+++ b/jstests/noPassthroughWithMongod/capped4.js
@@ -1,3 +1,4 @@
+// @tags: [requires_capped]
t = db.jstests_capped4;
t.drop();
diff --git a/jstests/noPassthroughWithMongod/capped_truncate.js b/jstests/noPassthroughWithMongod/capped_truncate.js
index c35318a6649..1f4cf236c57 100644
--- a/jstests/noPassthroughWithMongod/capped_truncate.js
+++ b/jstests/noPassthroughWithMongod/capped_truncate.js
@@ -5,7 +5,7 @@
* - non-capped collections
*
* This test fails with the ephemeralForTest storage engine.
- * @tags: [SERVER-21658]
+ * @tags: [SERVER-21658, requires_capped]
*/
(function() {
'use strict';
diff --git a/jstests/noPassthroughWithMongod/clonecollection.js b/jstests/noPassthroughWithMongod/clonecollection.js
index 7f2e1fbf771..8ae9203a93a 100644
--- a/jstests/noPassthroughWithMongod/clonecollection.js
+++ b/jstests/noPassthroughWithMongod/clonecollection.js
@@ -1,3 +1,4 @@
+// @tags: [requires_capped, requires_profiling]
// Test cloneCollection command
var baseName = "jstests_clonecollection";
@@ -65,4 +66,4 @@ assert.commandFailedWithCode(t.cloneCollection("localhost:" + fromMongod.port, "
ErrorCodes.CommandNotSupportedOnView,
"cloneCollection on view expected to fail");
MongoRunner.stopMongod(fromMongod);
-MongoRunner.stopMongod(toMongod); \ No newline at end of file
+MongoRunner.stopMongod(toMongod);
diff --git a/jstests/noPassthroughWithMongod/explain2.js b/jstests/noPassthroughWithMongod/explain2.js
index 81b8951488f..3766c3a0e93 100644
--- a/jstests/noPassthroughWithMongod/explain2.js
+++ b/jstests/noPassthroughWithMongod/explain2.js
@@ -1,4 +1,5 @@
// Test for race condition SERVER-2807. One cursor is dropped and another is not.
+// @tags: [requires_capped]
collName = 'jstests_slowNightly_explain2';
@@ -15,4 +16,4 @@ for (i = 0; i < 800; ++i) {
t.find({x: {$gt: -1}, y: 1}).sort({x: -1}).explain();
}
-a(); \ No newline at end of file
+a();
diff --git a/jstests/noPassthroughWithMongod/find_cmd.js b/jstests/noPassthroughWithMongod/find_cmd.js
index 6ca536972c6..a1eac6b24f5 100644
--- a/jstests/noPassthroughWithMongod/find_cmd.js
+++ b/jstests/noPassthroughWithMongod/find_cmd.js
@@ -21,13 +21,16 @@ assert.eq(0, res.cursor.id);
assert.eq([], res.cursor.firstBatch);
// Ensure find command keeps cursor open if tailing a capped collection.
-coll.drop();
-assert.commandWorked(coll.getDB().createCollection(collname, {capped: true, size: 2048}));
-assert.writeOK(coll.insert({_id: 1}));
-res = coll.runCommand("find", {tailable: true});
-assert.commandWorked(res);
-assert.neq(0, res.cursor.id);
-assert.eq([{_id: 1}], res.cursor.firstBatch);
+// Skip the test with mobile storage engine as it doesn't support capped collections.
+if (jsTest.options().storageEngine !== "mobile") {
+ coll.drop();
+ assert.commandWorked(coll.getDB().createCollection(collname, {capped: true, size: 2048}));
+ assert.writeOK(coll.insert({_id: 1}));
+ res = coll.runCommand("find", {tailable: true});
+ assert.commandWorked(res);
+ assert.neq(0, res.cursor.id);
+ assert.eq([{_id: 1}], res.cursor.firstBatch);
+}
// Multiple batches.
coll.drop();
diff --git a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
index f61f7ec5a22..cef102b8e6d 100644
--- a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
+++ b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
@@ -1,5 +1,6 @@
/**
* Test that opcounters are correct for getMore operations on awaitData cursors.
+ * @tags: [requires_capped]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthroughWithMongod/query_oplogreplay.js b/jstests/noPassthroughWithMongod/query_oplogreplay.js
index 36e286f917d..c264e211ef6 100644
--- a/jstests/noPassthroughWithMongod/query_oplogreplay.js
+++ b/jstests/noPassthroughWithMongod/query_oplogreplay.js
@@ -1,5 +1,5 @@
// Test queries that set the OplogReplay flag.
-// @tags: [requires_replication]
+// @tags: [requires_replication, requires_capped]
(function() {
"use strict";
diff --git a/jstests/noPassthroughWithMongod/rpc_protocols.js b/jstests/noPassthroughWithMongod/rpc_protocols.js
index 9b1a9bb67d1..4a5e436454b 100644
--- a/jstests/noPassthroughWithMongod/rpc_protocols.js
+++ b/jstests/noPassthroughWithMongod/rpc_protocols.js
@@ -3,6 +3,7 @@
// A user can configure the shell to send commands via OP_QUERY or OP_COMMAND. This can be done at
// startup using the "--rpcProtocols" command line option, or at runtime using the
// "setClientRPCProtocols" method on the Mongo object.
+// @tags: [requires_profiling]
var RPC_PROTOCOLS = {OP_QUERY: "opQueryOnly", OP_COMMAND: "opCommandOnly"};
diff --git a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
index 0e8d05db09b..0aeb98d27e4 100644
--- a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
+++ b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
@@ -1,6 +1,7 @@
// Tests that specifying a maxTimeMS on a getMore for a tailable + awaitData cursor is not
// interpreted as a deadline for the operation.
// This test was designed to reproduce SERVER-33942 against a mongod.
+// @tags: [requires_capped]
(function() {
"use strict";
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index a2b42097e7c..f85d912b8fc 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -523,8 +523,14 @@ ExitCode _initAndListen(int listenPort) {
waitForShardRegistryReload(startupOpCtx.get()).transitional_ignore();
}
+ auto storageEngine = serviceContext->getStorageEngine();
+ invariant(storageEngine);
+
if (!storageGlobalParams.readOnly) {
- logStartup(startupOpCtx.get());
+
+ if (storageEngine->supportsCappedCollections()) {
+ logStartup(startupOpCtx.get());
+ }
startMongoDFTDC();
@@ -608,8 +614,6 @@ ExitCode _initAndListen(int listenPort) {
//
// Only do this on storage engines supporting snapshot reads, which hold resources we wish to
// release periodically in order to avoid storage cache pressure build up.
- auto storageEngine = serviceContext->getStorageEngine();
- invariant(storageEngine);
if (storageEngine->supportsReadConcernSnapshot()) {
startPeriodicThreadToAbortExpiredTransactions(serviceContext);
startPeriodicThreadToDecreaseSnapshotHistoryCachePressure(serviceContext);
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 65a9a281396..d462f8158a5 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -208,6 +208,13 @@ public:
}
/**
+ * This must not change over the lifetime of the engine.
+ */
+ virtual bool supportsCappedCollections() const {
+ return true;
+ }
+
+ /**
* Returns true if storage engine supports --directoryperdb.
* See:
* http://docs.mongodb.org/manual/reference/program/mongod/#cmdoption--directoryperdb
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index d954893a2a7..f7deeb3396f 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -83,7 +83,8 @@ KVStorageEngine::KVStorageEngine(
_options(options),
_engine(engine),
_supportsDocLocking(_engine->supportsDocLocking()),
- _supportsDBLocking(_engine->supportsDBLocking()) {
+ _supportsDBLocking(_engine->supportsDBLocking()),
+ _supportsCappedCollections(_engine->supportsCappedCollections()) {
uassert(28601,
"Storage engine does not support --directoryperdb",
!(options.directoryPerDB && !engine->supportsDirectoryPerDB()));
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index 6ffe83b5210..d839f149af6 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -95,6 +95,10 @@ public:
return _supportsDBLocking;
}
+ virtual bool supportsCappedCollections() const {
+ return _supportsCappedCollections;
+ }
+
virtual Status closeDatabase(OperationContext* opCtx, StringData db);
virtual Status dropDatabase(OperationContext* opCtx, StringData db);
@@ -200,6 +204,7 @@ private:
const bool _supportsDocLocking;
const bool _supportsDBLocking;
+ const bool _supportsCappedCollections;
Timestamp _initialDataTimestamp = Timestamp::kAllowUnstableCheckpointsSentinel;
std::unique_ptr<RecordStore> _catalogRecordStore;
diff --git a/src/mongo/db/storage/mobile/mobile_kv_engine.cpp b/src/mongo/db/storage/mobile/mobile_kv_engine.cpp
index d278bf02a14..4564760c758 100644
--- a/src/mongo/db/storage/mobile/mobile_kv_engine.cpp
+++ b/src/mongo/db/storage/mobile/mobile_kv_engine.cpp
@@ -126,6 +126,13 @@ Status MobileKVEngine::createRecordStore(OperationContext* opCtx,
StringData ident,
const CollectionOptions& options) {
// TODO: eventually will support file renaming but otherwise do not use collection options.
+
+ // Mobile doesn't support capped collections
+ if (options.capped) {
+ return Status(ErrorCodes::InvalidOptions,
+ "Capped collections are not supported by the mobile storage engine");
+ }
+
MobileRecordStore::create(opCtx, ident.toString());
return Status::OK();
}
diff --git a/src/mongo/db/storage/mobile/mobile_kv_engine.h b/src/mongo/db/storage/mobile/mobile_kv_engine.h
index a87da11ad31..8618cd4510a 100644
--- a/src/mongo/db/storage/mobile/mobile_kv_engine.h
+++ b/src/mongo/db/storage/mobile/mobile_kv_engine.h
@@ -80,6 +80,10 @@ public:
return false;
}
+ bool supportsCappedCollections() const override {
+ return false;
+ }
+
bool supportsDirectoryPerDB() const override {
return false;
}
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 99cc8ebd76f..41c937019cd 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -191,6 +191,13 @@ public:
}
/**
+ * Returns whether the storage engine supports capped collections.
+ */
+ virtual bool supportsCappedCollections() const {
+ return true;
+ }
+
+ /**
* Returns whether the engine supports a journalling concept or not.
*/
virtual bool isDurable() const = 0;
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 31744d6767d..13fc07014bd 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -209,7 +209,7 @@ public:
OperationContext& opCtx = *opCtxPtr;
DBDirectClient db(&opCtx);
- db.createCollection("unittests.clienttests.create", 4096, true);
+ db.createCollection("unittests.clienttests.create");
BSONObj info;
ASSERT(db.runCommand("unittests",
BSON("collstats"
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index dcf60479927..2d278ac3126 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -62,6 +62,11 @@ const char* ns = "a.b";
class Capped : public ClientBase {
public:
virtual void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
DBDirectClient client(&opCtx);
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 5a834ed8aa5..33530e2c2a5 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -351,6 +351,11 @@ TEST_F(DocumentSourceCursorTest, ExpressionContextAndSerializeVerbosityMismatch)
}
TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterTimeout) {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
// Make sure the collection exists, otherwise we'll default to a NO_YIELD yield policy.
const bool capped = true;
const long long cappedSize = 1024;
@@ -431,6 +436,11 @@ TEST_F(DocumentSourceCursorTest, NonAwaitDataCursorShouldErrorAfterTimeout) {
}
TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterBeingKilled) {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
// Make sure the collection exists, otherwise we'll default to a NO_YIELD yield policy.
const bool capped = true;
const long long cappedSize = 1024;
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 21320b06d4d..650370c7bb1 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -348,6 +348,11 @@ public:
class InsertBuildIdIndexInterrupt : public IndexBuildBase {
public:
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
// Recreate the collection as capped, without an _id index.
Database* db = _ctx.db();
Collection* coll;
@@ -387,6 +392,11 @@ public:
class InsertBuildIdIndexInterruptDisallowed : public IndexBuildBase {
public:
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
// Recreate the collection as capped, without an _id index.
Database* db = _ctx.db();
Collection* coll;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 3dd7e253f55..f182ae5b5ec 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -420,6 +420,11 @@ public:
_client.dropCollection("unittests.querytests.TailNotAtEnd");
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.TailNotAtEnd";
_client.createCollection(ns, 2047, true);
insert(ns, BSON("a" << 0));
@@ -446,6 +451,11 @@ public:
_client.dropCollection("unittests.querytests.EmptyTail");
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.EmptyTail";
_client.createCollection(ns, 1900, true);
unique_ptr<DBClientCursor> c = _client.query(
@@ -466,6 +476,11 @@ public:
_client.dropCollection("unittests.querytests.TailableDelete");
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.TailableDelete";
_client.createCollection(ns, 8192, true, 2);
insert(ns, BSON("a" << 0));
@@ -489,6 +504,11 @@ public:
_client.dropCollection("unittests.querytests.TailableDelete");
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.TailableDelete";
_client.createCollection(ns, 8192, true, 2);
insert(ns, BSON("a" << 0));
@@ -514,6 +534,11 @@ public:
_client.dropCollection("unittests.querytests.TailableInsertDelete");
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.TailableInsertDelete";
_client.createCollection(ns, 1330, true);
insert(ns, BSON("a" << 0));
@@ -559,6 +584,11 @@ public:
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.TailableQueryOnId";
BSONObj info;
_client.runCommand("unittests",
@@ -602,6 +632,11 @@ public:
_client.dropCollection("unittests.querytests.OplogReplayMode");
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.OplogReplayMode";
// Create a capped collection of size 10.
@@ -641,6 +676,11 @@ public:
_client.dropCollection("unittests.querytests.OplogReplayExplain");
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
const char* ns = "unittests.querytests.OplogReplayExplain";
// Create a capped collection of size 10.
@@ -1239,9 +1279,8 @@ public:
_n = 0;
}
void run() {
- // SERVER-32698 Skipping this test for mobile SE:
- // Capped collection isn't properly supported yet with mobile SE.
- if (mongo::storageGlobalParams.engine == "mobile") {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -1398,6 +1437,11 @@ public:
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
BSONObj info;
// Must use local db so that the collection is not replicated, to allow autoIndexId:false.
ASSERT(_client.runCommand("local",
@@ -1457,6 +1501,11 @@ public:
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
size_t startNumCursors = numCursorsOpen();
BSONObj info;
@@ -1514,6 +1563,11 @@ public:
}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
size_t startNumCursors = numCursorsOpen();
// Check OplogReplay mode with missing collection.
@@ -1578,6 +1632,11 @@ class Exhaust : public CollectionInternalBase {
public:
Exhaust() : CollectionInternalBase("exhaust") {}
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
BSONObj info;
ASSERT(_client.runCommand("unittests",
BSON("create"
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index bade34cb7ea..3e6c0131b39 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -156,6 +156,11 @@ template <bool rollback, bool defaultIndexes, bool capped>
class CreateCollection {
public:
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
string ns = "unittests.rollback_create_collection";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
@@ -187,6 +192,11 @@ template <bool rollback, bool defaultIndexes, bool capped>
class DropCollection {
public:
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
string ns = "unittests.rollback_drop_collection";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
@@ -228,6 +238,11 @@ template <bool rollback, bool defaultIndexes, bool capped>
class RenameCollection {
public:
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
NamespaceString source("unittests.rollback_rename_collection_src");
NamespaceString target("unittests.rollback_rename_collection_dest");
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
@@ -280,6 +295,11 @@ template <bool rollback, bool defaultIndexes, bool capped>
class RenameDropTargetCollection {
public:
void run() {
+ // Skip the test if the storage engine doesn't support capped collections.
+ if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ return;
+ }
+
NamespaceString source("unittests.rollback_rename_droptarget_collection_src");
NamespaceString target("unittests.rollback_rename_droptarget_collection_dest");
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();