summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/authorization_session.h8
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp40
-rw-r--r--src/mongo/db/auth/authorization_session_impl.h4
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp37
-rw-r--r--src/mongo/db/auth/role_graph_builtin_roles.cpp40
-rw-r--r--src/mongo/db/catalog/database.h6
-rw-r--r--src/mongo/db/catalog/database_impl.cpp1
-rw-r--r--src/mongo/db/catalog/database_impl.h9
-rw-r--r--src/mongo/db/catalog/drop_database.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database_test.cpp10
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp11
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.h6
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/clone_collection.cpp5
-rw-r--r--src/mongo/db/commands/compact.cpp3
-rw-r--r--src/mongo/db/commands/list_collections.cpp7
-rw-r--r--src/mongo/db/commands/list_indexes.cpp8
-rw-r--r--src/mongo/db/commands/oplog_application_checks.cpp2
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp5
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.cpp7
-rw-r--r--src/mongo/db/fts/fts_spec.cpp7
-rw-r--r--src/mongo/db/index_builder.cpp2
-rw-r--r--src/mongo/db/index_builder.h3
-rw-r--r--src/mongo/db/index_rebuilder.cpp13
-rw-r--r--src/mongo/db/namespace_string.h9
-rw-r--r--src/mongo/db/op_observer_impl.cpp58
-rw-r--r--src/mongo/db/ops/insert.cpp4
-rw-r--r--src/mongo/db/ops/write_ops.h6
-rw-r--r--src/mongo/db/ops/write_ops_document_stream_integration_test.cpp35
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp30
-rw-r--r--src/mongo/db/ops/write_ops_parsers.cpp28
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_test.cpp35
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp50
-rw-r--r--src/mongo/db/repl/apply_ops.cpp70
-rw-r--r--src/mongo/db/repl/do_txn.cpp12
-rw-r--r--src/mongo/db/repl/oplog.cpp47
-rw-r--r--src/mongo/db/repl/oplog.h9
-rw-r--r--src/mongo/db/repl/replication_coordinator.cpp5
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp4
-rw-r--r--src/mongo/db/s/operation_sharding_state.cpp8
-rw-r--r--src/mongo/db/service_entry_point_common.cpp2
42 files changed, 71 insertions, 581 deletions
diff --git a/src/mongo/db/auth/authorization_session.h b/src/mongo/db/auth/authorization_session.h
index 5042a1fd7e5..6afd520910b 100644
--- a/src/mongo/db/auth/authorization_session.h
+++ b/src/mongo/db/auth/authorization_session.h
@@ -202,12 +202,8 @@ public:
const BSONObj& update,
bool upsert) = 0;
- // Checks if this connection has the privileges necessary to insert the given document
- // to the given namespace. Correctly interprets inserts to system.indexes and performs
- // the proper auth checks for index building.
- virtual Status checkAuthForInsert(OperationContext* opCtx,
- const NamespaceString& ns,
- const BSONObj& document) = 0;
+ // Checks if this connection has the privileges necessary to insert to the given namespace.
+ virtual Status checkAuthForInsert(OperationContext* opCtx, const NamespaceString& ns) = 0;
// Checks if this connection has the privileges necessary to perform a delete on the given
// namespace.
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index c1920d36d63..7ef744aef6e 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -351,30 +351,14 @@ Status AuthorizationSessionImpl::checkAuthForGetMore(const NamespaceString& ns,
}
Status AuthorizationSessionImpl::checkAuthForInsert(OperationContext* opCtx,
- const NamespaceString& ns,
- const BSONObj& document) {
- if (ns.coll() == "system.indexes"_sd) {
- BSONElement nsElement = document["ns"];
- if (nsElement.type() != String) {
- return Status(nsElement.type() == BSONType::EOO ? ErrorCodes::NoSuchKey
- : ErrorCodes::TypeMismatch,
- "Cannot authorize inserting into "
- "system.indexes documents without a string-typed \"ns\" field.");
- }
- NamespaceString indexNS(nsElement.valueStringData());
- if (!isAuthorizedForActionsOnNamespace(indexNS, ActionType::createIndex)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized to create index on " << indexNS.ns());
- }
- } else {
- ActionSet required{ActionType::insert};
- if (documentValidationDisabled(opCtx)) {
- required.addAction(ActionType::bypassDocumentValidation);
- }
- if (!isAuthorizedForActionsOnNamespace(ns, required)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for insert on " << ns.ns());
- }
+ const NamespaceString& ns) {
+ ActionSet required{ActionType::insert};
+ if (documentValidationDisabled(opCtx)) {
+ required.addAction(ActionType::bypassDocumentValidation);
+ }
+ if (!isAuthorizedForActionsOnNamespace(ns, required)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized for insert on " << ns.ns());
}
return Status::OK();
@@ -747,13 +731,9 @@ bool AuthorizationSessionImpl::isAuthorizedToListCollections(StringData dbname,
return true;
}
- // Check for the listCollections ActionType on the database or find on system.namespaces for
- // pre 3.0 systems.
+ // Check for the listCollections ActionType on the database.
return AuthorizationSessionImpl::isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::listCollections) ||
- AuthorizationSessionImpl::isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(dbname, "system.namespaces")),
- ActionType::find);
+ ResourcePattern::forDatabaseName(dbname), ActionType::listCollections);
}
bool AuthorizationSessionImpl::isAuthenticatedAsUserWithRole(const RoleName& roleName) {
diff --git a/src/mongo/db/auth/authorization_session_impl.h b/src/mongo/db/auth/authorization_session_impl.h
index 5449d2a99b8..b0b6bb731d3 100644
--- a/src/mongo/db/auth/authorization_session_impl.h
+++ b/src/mongo/db/auth/authorization_session_impl.h
@@ -113,9 +113,7 @@ public:
const BSONObj& update,
bool upsert) override;
- Status checkAuthForInsert(OperationContext* opCtx,
- const NamespaceString& ns,
- const BSONObj& document) override;
+ Status checkAuthForInsert(OperationContext* opCtx, const NamespaceString& ns) override;
Status checkAuthForDelete(OperationContext* opCtx,
const NamespaceString& ns,
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index e629f50cac5..cad01f09e9b 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -149,20 +149,12 @@ const ResourcePattern otherUsersCollResource(
ResourcePattern::forExactNamespace(NamespaceString("other.system.users")));
const ResourcePattern thirdUsersCollResource(
ResourcePattern::forExactNamespace(NamespaceString("third.system.users")));
-const ResourcePattern testIndexesCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("test.system.indexes")));
-const ResourcePattern otherIndexesCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("other.system.indexes")));
-const ResourcePattern thirdIndexesCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("third.system.indexes")));
const ResourcePattern testProfileCollResource(
ResourcePattern::forExactNamespace(NamespaceString("test.system.profile")));
const ResourcePattern otherProfileCollResource(
ResourcePattern::forExactNamespace(NamespaceString("other.system.profile")));
const ResourcePattern thirdProfileCollResource(
ResourcePattern::forExactNamespace(NamespaceString("third.system.profile")));
-const ResourcePattern testSystemNamespacesResource(
- ResourcePattern::forExactNamespace(NamespaceString("test.system.namespaces")));
TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
// Check that disabling auth checks works
@@ -360,12 +352,8 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
ASSERT_TRUE(
- authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
- ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
ASSERT_TRUE(
- authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
- ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
// Logging in as useradminany@test implicitly logs out rwany@test.
@@ -379,12 +367,8 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
ASSERT_FALSE(
- authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
- ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
ASSERT_FALSE(
- authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
- ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
// Logging in as rw@test implicitly logs out useradminany@test.
@@ -399,12 +383,8 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
ASSERT_TRUE(
- authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
- ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
ASSERT_FALSE(
- authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
- ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
@@ -419,12 +399,8 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
ASSERT_FALSE(
- authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
- ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
ASSERT_FALSE(
- authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
- ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
}
@@ -1252,19 +1228,6 @@ TEST_F(AuthorizationSessionTest, CannotListCollectionsWithoutListCollectionsPriv
ASSERT_FALSE(authzSession->isAuthorizedToListCollections(testQuxNss.db(), cmd));
}
-TEST_F(AuthorizationSessionTest, CanListCollectionsWithLegacySystemNamespacesAccess) {
- BSONObj cmd = BSON("listCollections" << 1);
-
- // Deprecated: permissions for the find action on test.system.namespaces allows us to list
- // collections in the test database.
- authzSession->assumePrivilegesForDB(
- Privilege(testSystemNamespacesResource, {ActionType::find}));
-
- ASSERT_TRUE(authzSession->isAuthorizedToListCollections(testFooNss.db(), cmd));
- ASSERT_TRUE(authzSession->isAuthorizedToListCollections(testBarNss.db(), cmd));
- ASSERT_TRUE(authzSession->isAuthorizedToListCollections(testQuxNss.db(), cmd));
-}
-
TEST_F(AuthorizationSessionTest, CanListCollectionsWithListCollectionsPrivilege) {
BSONObj cmd = BSON("listCollections" << 1);
// The listCollections privilege authorizes the list collections command.
diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp
index 8f096ac641d..d9875a20f96 100644
--- a/src/mongo/db/auth/role_graph_builtin_roles.cpp
+++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp
@@ -261,16 +261,8 @@ void addReadOnlyDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
privileges, Privilege(ResourcePattern::forDatabaseName(dbName), readRoleActions));
Privilege::addPrivilegeToPrivilegeVector(
privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.indexes")),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.js")),
readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.namespaces")),
- readRoleActions));
}
void addReadWriteDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
@@ -291,14 +283,6 @@ void addUserAdminDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
void addDbAdminDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forDatabaseName(dbName), dbAdminRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.indexes")),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.namespaces")),
- readRoleActions));
ActionSet profileActions = readRoleActions;
profileActions.addAction(ActionType::convertToCapped);
@@ -329,13 +313,7 @@ void addReadOnlyAnyDbPrivileges(PrivilegeVector* privileges) {
Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.indexes"), readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forCollectionName("system.js"), readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"), readRoleActions));
}
void addReadWriteAnyDbPrivileges(PrivilegeVector* privileges) {
@@ -402,12 +380,6 @@ void addDbAdminAnyDbPrivileges(PrivilegeVector* privileges) {
privileges, Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forAnyNormalResource(), dbAdminRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.indexes"), readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"), readRoleActions));
ActionSet profileActions = readRoleActions;
profileActions.addAction(ActionType::convertToCapped);
profileActions.addAction(ActionType::createCollection);
@@ -509,14 +481,6 @@ void addQueryableBackupPrivileges(PrivilegeVector* privileges) {
privileges, Privilege(ResourcePattern::forDatabaseName("local"), ActionType::find));
Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.indexes"), ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"), ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forCollectionName("system.js"), ActionType::find));
Privilege::addPrivilegeToPrivilegeVector(
@@ -585,10 +549,6 @@ void addRestorePrivileges(PrivilegeVector* privileges) {
Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forCollectionName("system.js"), actions));
- // Need to be able to query system.namespaces to check existing collection options.
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"), ActionType::find));
Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections));
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index 73038f59a44..86c256d2925 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -115,8 +115,6 @@ public:
StringData toNS,
bool stayTemp) = 0;
- virtual const NamespaceString& getSystemIndexesName() const = 0;
-
virtual const std::string& getSystemViewsName() const = 0;
virtual StatusWith<NamespaceString> makeUniqueCollectionNamespace(
@@ -353,10 +351,6 @@ public:
*/
static MONGO_DECLARE_SHIM((OperationContext * opCtx, Database* db)->void) dropDatabase;
- inline const NamespaceString& getSystemIndexesName() const {
- return this->_impl().getSystemIndexesName();
- }
-
inline const std::string& getSystemViewsName() const {
return this->_impl().getSystemViewsName();
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 47033e7dcfa..3f68c72fe8c 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -248,7 +248,6 @@ DatabaseImpl::DatabaseImpl(Database* const this_,
: _name(name.toString()),
_dbEntry(dbEntry),
_profileName(_name + ".system.profile"),
- _indexesName(_name + ".system.indexes"),
_viewsName(_name + "." + DurableViewCatalog::viewsCollectionName().toString()),
_durableViews(DurableViewCatalogImpl(this_)),
_views(&_durableViews),
diff --git a/src/mongo/db/catalog/database_impl.h b/src/mongo/db/catalog/database_impl.h
index 8a341037778..77d8505d3d2 100644
--- a/src/mongo/db/catalog/database_impl.h
+++ b/src/mongo/db/catalog/database_impl.h
@@ -216,10 +216,6 @@ public:
static Status validateDBName(StringData dbname);
- const NamespaceString& getSystemIndexesName() const final {
- return _indexesName;
- }
-
const std::string& getSystemViewsName() const final {
return _viewsName;
}
@@ -279,9 +275,8 @@ private:
DatabaseCatalogEntry* _dbEntry; // not owned here
- const std::string _profileName; // "dbname.system.profile"
- const NamespaceString _indexesName; // "dbname.system.indexes"
- const std::string _viewsName; // "dbname.system.views"
+ const std::string _profileName; // "dbname.system.profile"
+ const std::string _viewsName; // "dbname.system.views"
int _profile; // 0=off.
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 58ae3624a62..fa80f77808a 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -145,7 +145,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
latestDropPendingOpTime, uassertStatusOK(nss.getDropPendingNamespaceOpTime()));
continue;
}
- if (replCoord->isOplogDisabledFor(opCtx, nss) || nss.isSystemDotIndexes()) {
+ if (replCoord->isOplogDisabledFor(opCtx, nss)) {
continue;
}
collectionsToDrop.push_back(nss);
diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp
index db8de66f22c..9cf3c7eefa7 100644
--- a/src/mongo/db/catalog/drop_database_test.cpp
+++ b/src/mongo/db/catalog/drop_database_test.cpp
@@ -285,16 +285,6 @@ TEST_F(DropDatabaseTest, DropDatabasePassedThroughAwaitReplicationErrorForDropPe
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, dropDatabase(_opCtx.get(), _nss.db().toString()));
}
-TEST_F(DropDatabaseTest, DropDatabaseSkipsSystemDotIndexesCollectionWhenDroppingCollections) {
- NamespaceString systemDotIndexesNss(_nss.getSystemIndexesCollection());
- _testDropDatabase(_opCtx.get(), _opObserver, systemDotIndexesNss, false);
-}
-
-TEST_F(DropDatabaseTest, DropDatabaseSkipsSystemNamespacesCollectionWhenDroppingCollections) {
- NamespaceString systemNamespacesNss(_nss.getSisterNS("system.namespaces"));
- _testDropDatabase(_opCtx.get(), _opObserver, systemNamespacesNss, false);
-}
-
TEST_F(DropDatabaseTest, DropDatabaseSkipsSystemProfileCollectionWhenDroppingCollections) {
repl::OpTime dropOpTime(Timestamp(Seconds(100), 0), 1LL);
NamespaceString profileNss(_nss.getSisterNS("system.profile"));
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index af033c58f91..f6f9eea188c 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -503,10 +503,6 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
<< static_cast<int>(indexVersion));
}
- if (nss.isSystemDotIndexes())
- return Status(ErrorCodes::CannotCreateIndex,
- "cannot have an index on the system.indexes collection");
-
if (nss.isOplog())
return Status(ErrorCodes::CannotCreateIndex, "cannot have an index on the oplog");
@@ -963,13 +959,6 @@ private:
} // namespace
Status IndexCatalogImpl::_dropIndex(OperationContext* opCtx, IndexCatalogEntry* entry) {
- /**
- * IndexState in order
- * <db>.system.indexes
- * NamespaceDetails
- * <db>.system.ns
- */
-
// ----- SANITY CHECKS -------------
if (!entry)
return Status(ErrorCodes::BadValue, "IndexCatalog::_dropIndex passed NULL");
diff --git a/src/mongo/db/catalog/index_catalog_impl.h b/src/mongo/db/catalog/index_catalog_impl.h
index ef42f1b8cfc..f6b83145304 100644
--- a/src/mongo/db/catalog/index_catalog_impl.h
+++ b/src/mongo/db/catalog/index_catalog_impl.h
@@ -269,12 +269,10 @@ public:
/**
* disk creation order
- * 1) system.indexes entry
- * 2) collection's NamespaceDetails
+ * 1) collection's NamespaceDetails
* a) info + head
* b) _indexBuildsInProgress++
- * 3) indexes entry in .ns file
- * 4) system.namespaces entry for index ns
+ * 2) indexes entry in .ns file
*/
class IndexBuildBlock {
MONGO_DISALLOW_COPYING(IndexBuildBlock);
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 98c0e5ce7d0..e42aabc4f7b 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -139,8 +139,6 @@ struct Cloner::Fun {
: lastLog(0), opCtx(opCtx), _dbName(dbName) {}
void operator()(DBClientCursorBatchIterator& i) {
- invariant(from_collection.coll() != "system.indexes");
-
// XXX: can probably take dblock instead
unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(opCtx));
uassert(
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index af8d26eb518..2a3da6149d7 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -128,9 +128,8 @@ public:
// In order to clone a namespace, a user must be allowed to both create and write to that
// namespace. There exist namespaces that are legal to create but not write to (e.g.
- // system.profile), and there exist namespaces that are legal to write to but not create
- // (e.g. system.indexes), so we must check that it is legal to both create and write to the
- // namespace.
+ // system.profile), and there exist namespaces that are legal to write to but not create,
+ // so we must check that it is legal to both create and write to the namespace.
auto allowedCreateStatus = userAllowedCreateNS(dbname, nsToCollectionSubstring(ns));
uassertStatusOK(allowedCreateStatus);
auto allowedWriteStatus = userAllowedWriteNS(dbname, nsToCollectionSubstring(ns));
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index bffe66dd8c0..bfb9c33fd60 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -107,8 +107,7 @@ public:
}
if (nss.isSystem()) {
- // items in system.* cannot be moved as there might be pointers to them
- // i.e. system.indexes entries are pointed to from NamespaceDetails
+ // Items in system.* cannot be moved as there might be pointers to them.
errmsg = "can't compact a system namespace";
return false;
}
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 509bdc1d201..bdd3b2731d9 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -112,7 +112,7 @@ boost::optional<vector<StringData>> _getExactNameMatches(const MatchExpression*
* Uses 'matcher' to determine if the collection's information should be added to 'root'. If so,
* allocates a WorkingSetMember containing information about 'collection', and adds it to 'root'.
*
- * Does not add any information about the system.namespaces collection, or non-existent collections.
+ * Does not add any information about non-existent collections.
*/
void _addWorkingSetMember(OperationContext* opCtx,
const BSONObj& maybe,
@@ -161,16 +161,11 @@ BSONObj buildCollectionBson(OperationContext* opCtx,
const Collection* collection,
bool includePendingDrops,
bool nameOnly) {
-
if (!collection) {
return {};
}
-
auto nss = collection->ns();
auto collectionName = nss.coll();
- if (collectionName == "system.namespaces") {
- return {};
- }
// Drop-pending collections are replicated collections that have been marked for deletion.
// These collections are considered dropped and should not be returned in the results for this
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 9402e24bd17..e49a65124c2 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -99,15 +99,11 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- // Check for the listIndexes ActionType on the database, or find on system.indexes for pre
- // 3.0 systems.
+ // Check for the listIndexes ActionType on the database.
const auto nss = AutoGetCollection::resolveNamespaceStringOrUUID(
opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj));
if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(nss),
- ActionType::listIndexes) ||
- authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(dbname, "system.indexes")),
- ActionType::find)) {
+ ActionType::listIndexes)) {
return Status::OK();
}
diff --git a/src/mongo/db/commands/oplog_application_checks.cpp b/src/mongo/db/commands/oplog_application_checks.cpp
index 1a001805f55..873492f5243 100644
--- a/src/mongo/db/commands/oplog_application_checks.cpp
+++ b/src/mongo/db/commands/oplog_application_checks.cpp
@@ -103,7 +103,7 @@ Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opC
}
if (opType == "i"_sd) {
- return authSession->checkAuthForInsert(opCtx, ns, o);
+ return authSession->checkAuthForInsert(opCtx, ns);
} else if (opType == "u"_sd) {
BSONElement o2Elem = oplogEntry["o2"];
checkBSONType(BSONType::Object, o2Elem);
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index 36c8aa4105b..c286ef21e1d 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -143,11 +143,6 @@ public:
return false;
}
- if (source.isSystemDotIndexes() || target.isSystemDotIndexes()) {
- errmsg = "renaming system.indexes is not allowed";
- return false;
- }
-
if (source.isServerConfigurationCollection()) {
uasserted(ErrorCodes::IllegalOperation,
"renaming the server configuration "
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.cpp b/src/mongo/db/commands/write_commands/write_commands_common.cpp
index 49f72b919fd..3ba6338cd0d 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands_common.cpp
@@ -65,13 +65,6 @@ NamespaceString _getIndexedNss(const std::vector<BSONObj>& documents) {
void fillPrivileges(const write_ops::Insert& op,
std::vector<Privilege>* privileges,
ActionSet* actions) {
- if (op.getNamespace().isSystemDotIndexes()) {
- // Special-case indexes until we have a command
- privileges->push_back(
- Privilege(ResourcePattern::forExactNamespace(_getIndexedNss(op.getDocuments())),
- ActionType::createIndex));
- return;
- }
actions->addAction(ActionType::insert);
}
diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp
index 9022b72bb0b..5225e46106d 100644
--- a/src/mongo/db/fts/fts_spec.cpp
+++ b/src/mongo/db/fts/fts_spec.cpp
@@ -66,10 +66,9 @@ bool validateOverride(const string& override) {
}
FTSSpec::FTSSpec(const BSONObj& indexInfo) {
- // indexInfo is a text index spec. Text index specs pass through fixSpec() before
- // being saved to the system.indexes collection. fixSpec() enforces a schema, such that
- // required fields must exist and be of the correct type (e.g. weights,
- // textIndexVersion).
+ // indexInfo is a text index spec. Text index specs pass through fixSpec() before being
+ // persisted. fixSpec() enforces a schema, such that required fields must exist and be of the
+ // correct type (e.g. weights, textIndexVersion).
massert(16739, "found invalid spec for text index", indexInfo["weights"].isABSONObj());
BSONElement textIndexVersionElt = indexInfo["textIndexVersion"];
massert(17367,
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index e70494335ff..778bd77df4d 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -134,8 +134,6 @@ void IndexBuilder::run() {
NamespaceString ns(_index["ns"].String());
Lock::DBLock dlk(opCtx.get(), ns.db(), MODE_X);
- OldClientContext ctx(opCtx.get(), ns.getSystemIndexesCollection());
-
Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx.get(), ns.db().toString());
Status status = _build(opCtx.get(), db, true, &dlk);
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index 1499b8cb816..835cca474a9 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -58,8 +58,7 @@ class OperationContext;
* parent thread, waitForBgIndexStarting() must be called by the same parent thread,
* before any other thread calls go() on any other IndexBuilder instance. This is
* ensured by the replication system, since commands are effectively run single-threaded
- * by the replication applier, and index builds are treated as commands even though they look
- * like inserts on system.indexes.
+ * by the replication applier.
* The argument "relaxConstraints" specifies whether we should honor or ignore index constraints,
* The ignoring of constraints is for replication due to idempotency reasons.
* The argument "initIndexTs" specifies the timestamp to be used to make the initial catalog write.
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 376d05e5e74..cec89996944 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -86,13 +86,12 @@ void checkNS(OperationContext* opCtx, const std::list<std::string>& nsToCheck) {
WriteUnitOfWork wunit(opCtx);
vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(opCtx);
- // The indexes have now been removed from system.indexes, so the only record is
- // in-memory. If there is a journal commit between now and when insert() rewrites
- // the entry and the db crashes before the new system.indexes entry is journalled,
- // the index will be lost forever. Thus, we must stay in the same WriteUnitOfWork
- // to ensure that no journaling will happen between now and the entry being
- // re-written in MultiIndexBlock::init(). The actual index building is done outside
- // of this WUOW.
+ // The indexes have now been removed from persisted memory, so the only record is
+ // in-memory. If there is a journal commit between now and when 'indexer.init' rewrites
+ // the entry and the db crashes before the new persisted index state is journalled, the
+ // index will be lost forever. Thus, we must stay in the same WriteUnitOfWork to ensure
+ // that no journaling will happen between now and the entry being re-written in
+ // MultiIndexBlock::init(). The actual index building is done outside of this WUOW.
if (indexesToBuild.empty()) {
continue;
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index f6ef0ce8ca6..86459120bfe 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -228,9 +228,6 @@ public:
bool isLocal() const {
return db() == kLocalDb;
}
- bool isSystemDotIndexes() const {
- return coll() == "system.indexes";
- }
bool isSystemDotProfile() const {
return coll() == "system.profile";
}
@@ -354,10 +351,6 @@ public:
*/
std::string getSisterNS(StringData local) const;
- std::string getSystemIndexesCollection() const {
- return db().toString() + ".system.indexes";
- }
-
NamespaceString getCommandNS() const {
return {db(), "$cmd"};
}
@@ -426,7 +419,7 @@ public:
* samples:
* good:
* foo
- * system.indexes
+ * system.views
* bad:
* $foo
* @param coll - a collection name component of a namespace
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 4fa45882eec..a15226186b6 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -325,48 +325,30 @@ void OpObserverImpl::onCreateIndex(OperationContext* opCtx,
OptionalCollectionUUID uuid,
BSONObj indexDoc,
bool fromMigrate) {
- const NamespaceString systemIndexes{nss.getSystemIndexesCollection()};
+ // TODO: uuid should no longer be optional (SERVER-36472).
+ invariant(uuid);
- if (uuid) {
- BSONObjBuilder builder;
- builder.append("createIndexes", nss.coll());
-
- for (const auto& e : indexDoc) {
- if (e.fieldNameStringData() != "ns"_sd)
- builder.append(e);
- }
+ BSONObjBuilder builder;
+ builder.append("createIndexes", nss.coll());
- logOperation(opCtx,
- "c",
- nss.getCommandNS(),
- uuid,
- builder.done(),
- nullptr,
- fromMigrate,
- getWallClockTimeForOpLog(opCtx),
- {},
- kUninitializedStmtId,
- {},
- false /* prepare */,
- OplogSlot());
- } else {
- logOperation(opCtx,
- "i",
- systemIndexes,
- {},
- indexDoc,
- nullptr,
- fromMigrate,
- getWallClockTimeForOpLog(opCtx),
- {},
- kUninitializedStmtId,
- {},
- false /* prepare */,
- OplogSlot());
+ for (const auto& e : indexDoc) {
+ if (e.fieldNameStringData() != "ns"_sd)
+ builder.append(e);
}
- AuthorizationManager::get(opCtx->getServiceContext())
- ->logOp(opCtx, "i", systemIndexes, indexDoc, nullptr);
+ logOperation(opCtx,
+ "c",
+ nss.getCommandNS(),
+ uuid,
+ builder.done(),
+ nullptr,
+ fromMigrate,
+ getWallClockTimeForOpLog(opCtx),
+ {},
+ kUninitializedStmtId,
+ {},
+ false /* prepare */,
+ OplogSlot());
}
void OpObserverImpl::onInserts(OperationContext* opCtx,
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index af50aca6415..8ab64d1dcd9 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -189,9 +189,6 @@ Status userAllowedWriteNS(StringData db, StringData coll) {
return Status(ErrorCodes::InvalidNamespace,
str::stream() << "cannot write to '" << db << ".system.profile'");
}
- if (coll == "system.indexes") {
- return Status::OK();
- }
return userAllowedCreateNS(db, coll);
}
@@ -223,7 +220,6 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
if (db == "system")
return Status(ErrorCodes::InvalidNamespace, "cannot use 'system' database");
-
if (coll.startsWith("system.")) {
if (coll == "system.js")
return Status::OK();
diff --git a/src/mongo/db/ops/write_ops.h b/src/mongo/db/ops/write_ops.h
index c3201f7f56f..081e23876b6 100644
--- a/src/mongo/db/ops/write_ops.h
+++ b/src/mongo/db/ops/write_ops.h
@@ -71,12 +71,6 @@ int32_t getStmtIdForWriteAt(const T& op, size_t writePos) {
return getStmtIdForWriteAt(op.getWriteCommandBase(), writePos);
}
-/**
- * Must only be called if the insert is for the "system.indexes" namespace. Returns the actual
- * namespace for which the index is being created.
- */
-NamespaceString extractIndexedNamespace(const Insert& insertOp);
-
// TODO: Delete this getter once IDL supports defaults for object and array fields
template <class T>
const BSONObj& collationOf(const T& opEntry) {
diff --git a/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp b/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp
index 51cbf103722..cb0c92c0536 100644
--- a/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp
+++ b/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp
@@ -66,39 +66,4 @@ TEST(WriteOpsDocSeq, InsertDocStreamWorks) {
ASSERT_EQ(conn->count(ns.ns()), 5u);
}
-TEST(WriteOpsDocSeq, InsertDocStreamWorksWithSystemDotIndexes) {
- std::string errMsg;
- auto conn = std::unique_ptr<DBClientBase>(
- unittest::getFixtureConnectionString().connect("integration_test", errMsg));
- uassert(ErrorCodes::SocketException, errMsg, conn);
-
- NamespaceString ns("test", "doc_seq");
- conn->dropCollection(ns.ns());
- ASSERT_EQ(conn->count(ns.ns()), 0u);
-
- OpMsgRequest request;
- request.body = BSON("insert"
- << "system.indexes"
- << "$db"
- << ns.db());
- request.sequences = {{"documents",
- {
- BSON("ns" << ns.ns() << "key" << BSON("x" << 1) << "name"
- << "my_special_index"),
- }}};
-
- const auto reply = conn->runCommand(std::move(request));
- ASSERT_EQ(int(reply->getProtocol()), int(rpc::Protocol::kOpMsg));
- auto body = reply->getCommandReply();
- ASSERT_OK(getStatusFromCommandResult(body));
- ASSERT_EQ(body["n"].Int(), 1);
-
- auto indexes = conn->getIndexSpecs(ns.ns());
- ASSERT_EQ(indexes.size(), 2u); // my_special_index + _id
-
- indexes.sort([](auto&& l, auto&& r) { return l["name"].String() < r["name"].String(); });
- ASSERT_EQ(indexes.front()["name"].String(), "_id_");
- ASSERT_EQ(indexes.back()["name"].String(), "my_special_index");
-}
-
} // namespace mongo
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 6e46309190b..d9dc53085b4 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -293,32 +293,6 @@ SingleWriteResult createIndex(OperationContext* opCtx,
return result;
}
-WriteResult performCreateIndexes(OperationContext* opCtx, const write_ops::Insert& wholeOp) {
- // Currently this creates each index independently. We could pass multiple indexes to
- // createIndexes, but there is a lot of complexity involved in doing it correctly. For one
- // thing, createIndexes only takes indexes to a single collection, but this batch could include
- // different collections. Additionally, the error handling is different: createIndexes is
- // all-or-nothing while inserts are supposed to behave like a sequence that either skips over
- // errors or stops at the first one. These could theoretically be worked around, but it doesn't
- // seem worth it since users that want faster index builds should just use the createIndexes
- // command rather than a legacy emulation.
- LastOpFixer lastOpFixer(opCtx, wholeOp.getNamespace());
- WriteResult out;
- for (auto&& spec : wholeOp.getDocuments()) {
- try {
- lastOpFixer.startingOp();
- out.results.emplace_back(createIndex(opCtx, wholeOp.getNamespace(), spec));
- lastOpFixer.finishedOpSuccessfully();
- } catch (const DBException& ex) {
- const bool canContinue =
- handleError(opCtx, ex, wholeOp.getNamespace(), wholeOp.getWriteCommandBase(), &out);
- if (!canContinue)
- break;
- }
- }
- return out;
-}
-
void insertDocuments(OperationContext* opCtx,
Collection* collection,
std::vector<InsertStatement>::iterator begin,
@@ -511,10 +485,6 @@ WriteResult performInserts(OperationContext* opCtx,
uassertStatusOK(userAllowedWriteNS(wholeOp.getNamespace()));
- if (wholeOp.getNamespace().isSystemDotIndexes()) {
- return performCreateIndexes(opCtx, wholeOp);
- }
-
DisableDocumentValidationIfTrue docValidationDisabler(
opCtx, wholeOp.getWriteCommandBase().getBypassDocumentValidation());
LastOpFixer lastOpFixer(opCtx, wholeOp.getNamespace());
diff --git a/src/mongo/db/ops/write_ops_parsers.cpp b/src/mongo/db/ops/write_ops_parsers.cpp
index ee98d43d17f..7abb058dd32 100644
--- a/src/mongo/db/ops/write_ops_parsers.cpp
+++ b/src/mongo/db/ops/write_ops_parsers.cpp
@@ -65,26 +65,7 @@ void checkOpCountForCommand(const T& op, size_t numOps) {
}
void validateInsertOp(const write_ops::Insert& insertOp) {
- const auto& nss = insertOp.getNamespace();
const auto& docs = insertOp.getDocuments();
-
- if (nss.isSystemDotIndexes()) {
- // This is only for consistency with sharding.
- uassert(ErrorCodes::InvalidLength,
- "Insert commands to system.indexes are limited to a single insert",
- docs.size() == 1);
-
- const auto indexedNss(extractIndexedNamespace(insertOp));
-
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << indexedNss.ns() << " is not a valid namespace to index",
- indexedNss.isValid());
-
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << indexedNss.ns() << " is not in the target database " << nss.db(),
- nss.db().compare(indexedNss.db()) == 0);
- }
-
checkOpCountForCommand(insertOp, docs.size());
}
@@ -119,15 +100,6 @@ int32_t getStmtIdForWriteAt(const WriteCommandBase& writeCommandBase, size_t wri
return kFirstStmtId + writePos;
}
-NamespaceString extractIndexedNamespace(const Insert& insertOp) {
- invariant(insertOp.getNamespace().isSystemDotIndexes());
-
- const auto& documents = insertOp.getDocuments();
- invariant(documents.size() == 1);
-
- return NamespaceString(documents.at(0)["ns"].str());
-}
-
} // namespace write_ops
write_ops::Insert InsertOp::parse(const OpMsgRequest& request) {
diff --git a/src/mongo/db/pipeline/document_source_change_stream_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
index f5897bd30af..c8a703726cb 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_test.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
@@ -983,34 +983,6 @@ TEST_F(ChangeStreamStageTest, MatchFiltersNoOp) {
checkTransformation(noOp, boost::none);
}
-TEST_F(ChangeStreamStageTest, MatchFiltersCreateIndex) {
- auto indexSpec = D{{"v", 2}, {"key", D{{"a", 1}}}, {"name", "a_1"_sd}, {"ns", nss.ns()}};
- NamespaceString indexNs(nss.getSystemIndexesCollection());
- bool fromMigrate = false; // At the moment this makes no difference.
- auto createIndex = makeOplogEntry(OpTypeEnum::kInsert, // op type
- indexNs, // namespace
- indexSpec.toBson(), // o
- boost::none, // uuid
- fromMigrate, // fromMigrate
- boost::none); // o2
-
- checkTransformation(createIndex, boost::none);
-}
-
-TEST_F(ChangeStreamStageTest, MatchFiltersCreateIndexFromMigrate) {
- auto indexSpec = D{{"v", 2}, {"key", D{{"a", 1}}}, {"name", "a_1"_sd}, {"ns", nss.ns()}};
- NamespaceString indexNs(nss.getSystemIndexesCollection());
- bool fromMigrate = true;
- auto createIndex = makeOplogEntry(OpTypeEnum::kInsert, // op type
- indexNs, // namespace
- indexSpec.toBson(), // o
- boost::none, // uuid
- fromMigrate, // fromMigrate
- boost::none); // o2
-
- checkTransformation(createIndex, boost::none);
-}
-
TEST_F(ChangeStreamStageTest, TransformationShouldBeAbleToReParseSerializedStage) {
auto expCtx = getExpCtx();
@@ -1559,13 +1531,6 @@ TEST_F(ChangeStreamStageDBTest, MatchFiltersNoOp) {
checkTransformation(noOp, boost::none);
}
-TEST_F(ChangeStreamStageDBTest, MatchFiltersCreateIndex) {
- auto indexSpec = D{{"v", 2}, {"key", D{{"a", 1}}}, {"name", "a_1"_sd}, {"ns", nss.ns()}};
- NamespaceString indexNs(nss.getSystemIndexesCollection());
- OplogEntry createIndex = makeOplogEntry(OpTypeEnum::kInsert, indexNs, indexSpec.toBson());
- checkTransformation(createIndex, boost::none);
-}
-
TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldIncludeShardKeyFromResumeToken) {
const Timestamp ts(3, 45);
const long long term = 4;
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index eb8a48c2cad..567b551c488 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -146,33 +146,6 @@ Status ensureAllCollectionsHaveUUIDs(OperationContext* opCtx,
invariant(db);
for (auto collectionIt = db->begin(); collectionIt != db->end(); ++collectionIt) {
Collection* coll = *collectionIt;
- // The presence of system.indexes or system.namespaces on wiredTiger may
- // have undesirable results (see SERVER-32894, SERVER-34482). It is okay to
- // drop these collections on wiredTiger because users are not permitted to
- // store data in them.
- if (coll->ns().coll() == "system.indexes" || coll->ns().coll() == "system.namespaces") {
- const auto nssToDrop = coll->ns();
- LOG(1) << "Attempting to drop invalid system collection " << nssToDrop;
- if (coll->numRecords(opCtx)) {
- severe(LogComponent::kControl) << "Cannot drop non-empty collection "
- << nssToDrop.ns();
- exitCleanly(EXIT_NEED_DOWNGRADE);
- }
- repl::UnreplicatedWritesBlock uwb(opCtx);
- writeConflictRetry(opCtx, "dropSystemIndexes", nssToDrop.ns(), [&] {
- WriteUnitOfWork wunit(opCtx);
- BSONObjBuilder unusedResult;
- fassert(50837,
- dropCollection(
- opCtx,
- nssToDrop,
- unusedResult,
- {},
- DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops));
- wunit.commit();
- });
- continue;
- }
// We expect all collections to have UUIDs in MongoDB 4.2
if (!coll->uuid()) {
@@ -483,29 +456,6 @@ StatusWith<bool> repairDatabasesAndCheckVersion(OperationContext* opCtx) {
}
}
- // Major versions match, check indexes
- const NamespaceString systemIndexes(db->name(), "system.indexes");
-
- Collection* coll = db->getCollection(opCtx, systemIndexes);
- auto exec = InternalPlanner::collectionScan(
- opCtx, systemIndexes.ns(), coll, PlanExecutor::NO_YIELD);
-
- BSONObj index;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) {
- if (index["v"].isNumber() && index["v"].numberInt() == 0) {
- log() << "WARNING: The index: " << index << " was created with the deprecated"
- << " v:0 format. This format will not be supported in a future release."
- << startupWarningsLog;
- log() << "\t To fix this, you need to rebuild this index."
- << " For instructions, see http://dochub.mongodb.org/core/rebuild-v0-indexes"
- << startupWarningsLog;
- }
- }
-
- // Non-yielding collection scans from InternalPlanner will never error.
- invariant(PlanExecutor::IS_EOF == state);
-
if (replSettings.usingReplSets()) {
// We only care about _id indexes and drop-pending collections if we are in a replset.
checkForIdIndexesAndDropPendingCollections(opCtx, db);
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 0b4262b3e85..8f3f25b8514 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -70,14 +70,7 @@ MONGO_FAIL_POINT_DEFINE(applyOpsPauseBetweenOperations);
*/
bool _parseAreOpsCrudOnly(const BSONObj& applyOpCmd) {
for (const auto& elem : applyOpCmd.firstElement().Obj()) {
- const char* names[] = {"ns", "op"};
- BSONElement fields[2];
- elem.Obj().getFields(2, names, fields);
- BSONElement& fieldNs = fields[0];
- BSONElement& fieldOp = fields[1];
-
- const char* opType = fieldOp.valuestrsafe();
- const StringData ns = fieldNs.valuestrsafe();
+ const char* opType = elem.Obj().getField("op").valuestrsafe();
// All atomic ops have an opType of length 1.
if (opType[0] == '\0' || opType[1] != '\0')
@@ -90,8 +83,7 @@ bool _parseAreOpsCrudOnly(const BSONObj& applyOpCmd) {
case 'u':
break;
case 'i':
- if (nsToCollectionSubstring(ns) != "system.indexes")
- break;
+ break;
// Fallthrough.
default:
return false;
@@ -152,7 +144,7 @@ Status _applyOps(OperationContext* opCtx,
// NamespaceNotFound.
// Additionally for inserts, we fail early on non-existent collections.
auto collection = db->getCollection(opCtx, nss);
- if (!collection && !nss.isSystemDotIndexes() && (*opType == 'i' || *opType == 'u')) {
+ if (!collection && (*opType == 'i' || *opType == 'u')) {
uasserted(
ErrorCodes::AtomicityFailure,
str::stream()
@@ -210,7 +202,7 @@ Status _applyOps(OperationContext* opCtx,
}
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- if (!autoColl.getCollection() && !nss.isSystemDotIndexes()) {
+ if (!autoColl.getCollection()) {
// For idempotency reasons, return success on delete operations.
if (*opType == 'd') {
return Status::OK();
@@ -226,54 +218,12 @@ Status _applyOps(OperationContext* opCtx,
OldClientContext ctx(opCtx, nss.ns());
- if (!nss.isSystemDotIndexes()) {
- // We return the status rather than merely aborting so failure of CRUD
- // ops doesn't stop the applyOps from trying to process the rest of the
- // ops. This is to leave the door open to parallelizing CRUD op
- // application in the future.
- return repl::applyOperation_inlock(
- opCtx, ctx.db(), opObj, alwaysUpsert, oplogApplicationMode);
- }
-
- auto fieldO = opObj["o"];
- BSONObj indexSpec;
- NamespaceString indexNss;
- std::tie(indexSpec, indexNss) =
- repl::prepForApplyOpsIndexInsert(fieldO, opObj, nss);
- if (!indexSpec["collation"]) {
- // If the index spec does not include a collation, explicitly specify
- // the simple collation, so the index does not inherit the collection
- // default collation.
- auto indexVersion = indexSpec["v"];
- // The index version is populated by prepForApplyOpsIndexInsert().
- invariant(indexVersion);
- if (indexVersion.isNumber() &&
- (indexVersion.numberInt() >=
- static_cast<int>(IndexDescriptor::IndexVersion::kV2))) {
- BSONObjBuilder bob;
- bob.append("collation", CollationSpec::kSimpleSpec);
- bob.appendElements(indexSpec);
- indexSpec = bob.obj();
- }
- }
- BSONObjBuilder command;
- command.append("createIndexes", indexNss.coll());
- {
- BSONArrayBuilder indexes(command.subarrayStart("indexes"));
- indexes.append(indexSpec);
- indexes.doneFast();
- }
- const BSONObj commandObj = command.done();
-
- DBDirectClient client(opCtx);
- BSONObj infoObj;
- client.runCommand(nss.db().toString(), commandObj, infoObj);
-
- // Uassert to stop applyOps only when building indexes, but not for CRUD
- // ops.
- uassertStatusOK(getStatusFromCommandResult(infoObj));
-
- return Status::OK();
+ // We return the status rather than merely aborting so failure of CRUD
+ // ops doesn't stop the applyOps from trying to process the rest of the
+ // ops. This is to leave the door open to parallelizing CRUD op
+ // application in the future.
+ return repl::applyOperation_inlock(
+ opCtx, ctx.db(), opObj, alwaysUpsert, oplogApplicationMode);
});
} catch (const DBException& ex) {
ab.append(false);
diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp
index 62537c8c8b8..b7e91fdd030 100644
--- a/src/mongo/db/repl/do_txn.cpp
+++ b/src/mongo/db/repl/do_txn.cpp
@@ -70,14 +70,7 @@ MONGO_FAIL_POINT_DEFINE(doTxnPauseBetweenOperations);
*/
bool _areOpsCrudOnly(const BSONObj& doTxnCmd) {
for (const auto& elem : doTxnCmd.firstElement().Obj()) {
- const char* names[] = {"ns", "op"};
- BSONElement fields[2];
- elem.Obj().getFields(2, names, fields);
- BSONElement& fieldNs = fields[0];
- BSONElement& fieldOp = fields[1];
-
- const char* opType = fieldOp.valuestrsafe();
- const StringData ns = fieldNs.valuestrsafe();
+ const char* opType = elem.Obj().getField("op").valuestrsafe();
// All atomic ops have an opType of length 1.
if (opType[0] == '\0' || opType[1] != '\0')
@@ -89,8 +82,7 @@ bool _areOpsCrudOnly(const BSONObj& doTxnCmd) {
case 'u':
break;
case 'i':
- if (nsToCollectionSubstring(ns) != "system.indexes")
- break;
+ break;
// Fallthrough.
default:
return false;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 5e9890bc896..5ea63255c23 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1024,45 +1024,6 @@ StatusWith<OplogApplication::Mode> OplogApplication::parseMode(const std::string
MONGO_UNREACHABLE;
}
-std::pair<BSONObj, NamespaceString> prepForApplyOpsIndexInsert(const BSONElement& fieldO,
- const BSONObj& op,
- const NamespaceString& requestNss) {
- uassert(ErrorCodes::NoSuchKey,
- str::stream() << "Missing expected index spec in field 'o': " << op,
- !fieldO.eoo());
- uassert(ErrorCodes::TypeMismatch,
- str::stream() << "Expected object for index spec in field 'o': " << op,
- fieldO.isABSONObj());
- BSONObj indexSpec = fieldO.embeddedObject();
-
- std::string indexNs;
- uassertStatusOK(bsonExtractStringField(indexSpec, "ns", &indexNs));
- const NamespaceString indexNss(indexNs);
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid namespace in index spec: " << op,
- indexNss.isValid());
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << "Database name mismatch for database (" << requestNss.db()
- << ") while creating index: "
- << op,
- requestNss.db() == indexNss.db());
-
- if (!indexSpec["v"]) {
- // If the "v" field isn't present in the index specification, then we assume it is a
- // v=1 index from an older version of MongoDB. This is because
- // (1) we haven't built v=0 indexes as the default for a long time, and
- // (2) the index version has been included in the corresponding oplog entry since
- // v=2 indexes were introduced.
- BSONObjBuilder bob;
-
- bob.append("v", static_cast<int>(IndexVersion::kV1));
- bob.appendElements(indexSpec);
-
- indexSpec = bob.obj();
- }
-
- return std::make_pair(indexSpec, indexNss);
-}
// @return failure status if an update should have happened and the document DNE.
// See replset initial sync code.
Status applyOperation_inlock(OperationContext* opCtx,
@@ -1222,14 +1183,6 @@ Status applyOperation_inlock(OperationContext* opCtx,
str::stream() << "Oplog entry did not have 'ts' field when expected: " << redact(op));
if (*opType == 'i') {
- if (requestNss.isSystemDotIndexes()) {
- BSONObj indexSpec;
- NamespaceString indexNss;
- std::tie(indexSpec, indexNss) =
- repl::prepForApplyOpsIndexInsert(fieldO, op, requestNss);
- createIndexForApplyOps(opCtx, indexSpec, indexNss, incrementOpsAppliedStats, mode);
- return Status::OK();
- }
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply insert due to missing collection: "
<< op.toString(),
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index 565ab7f31cb..f64a0d0dcb6 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -164,15 +164,6 @@ void acquireOplogCollectionForLogging(OperationContext* opCtx);
void establishOplogCollectionForLogging(OperationContext* opCtx, Collection* oplog);
using IncrementOpsAppliedStatsFn = stdx::function<void()>;
-/**
- * Take the object field of a BSONObj, the BSONObj, and the namespace of
- * the operation and perform necessary validation to ensure the BSONObj is a
- * properly-formed command to insert into system.indexes. This is only to
- * be used for insert operations into system.indexes. It is called via applyOps.
- */
-std::pair<BSONObj, NamespaceString> prepForApplyOpsIndexInsert(const BSONElement& fieldO,
- const BSONObj& op,
- const NamespaceString& requestNss);
/**
* This class represents the different modes of oplog application that are used within the
diff --git a/src/mongo/db/repl/replication_coordinator.cpp b/src/mongo/db/repl/replication_coordinator.cpp
index 288cabf18a6..72954386923 100644
--- a/src/mongo/db/repl/replication_coordinator.cpp
+++ b/src/mongo/db/repl/replication_coordinator.cpp
@@ -86,11 +86,6 @@ bool ReplicationCoordinator::isOplogDisabledFor(OperationContext* opCtx,
return true;
}
- // <db>.system.namespaces is a MMAP-only collection and is not replicated.
- if (nss.coll() == "system.namespaces"_sd) {
- return true;
- }
-
fassert(28626, opCtx->recoveryUnit());
return false;
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 9a681f74d88..c14d6a9c00b 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -680,7 +680,7 @@ TEST_F(StorageInterfaceImplTest,
TEST_F(StorageInterfaceImplTest, CreateCollectionThatAlreadyExistsFails) {
auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
- NamespaceString nss("test.system.indexes");
+ NamespaceString nss("test.foo");
createCollection(opCtx, nss);
const CollectionOptions opts = generateOptionsWithUuid();
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 5e7c7b57742..ae3717efbca 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -271,10 +271,6 @@ Status SyncTail::syncApply(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
OldClientContext ctx(opCtx, nss.ns());
return applyOp(ctx.db());
- } else if (opType == OpTypeEnum::kInsert && nss.isSystemDotIndexes()) {
- Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
- OldClientContext ctx(opCtx, nss.ns());
- return applyOp(ctx.db());
} else if (OplogEntry::isCrudOpType(opType)) {
return writeConflictRetry(opCtx, "syncApply_CRUD", nss.ns(), [&] {
// Need to throw instead of returning a status for it to be properly ignored.
diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp
index b4cc2457622..88588db7ce6 100644
--- a/src/mongo/db/s/operation_sharding_state.cpp
+++ b/src/mongo/db/s/operation_sharding_state.cpp
@@ -79,13 +79,7 @@ void OperationShardingState::initializeClientRoutingVersions(NamespaceString nss
const auto shardVersionElem = cmdObj.getField(ChunkVersion::kShardVersionField);
if (!shardVersionElem.eoo()) {
- auto shardVersion = uassertStatusOK(ChunkVersion::parseFromCommand(cmdObj));
-
- if (nss.isSystemDotIndexes()) {
- _shardVersions[nss.ns()] = ChunkVersion::IGNORED();
- } else {
- _shardVersions[nss.ns()] = std::move(shardVersion);
- }
+ _shardVersions[nss.ns()] = uassertStatusOK(ChunkVersion::parseFromCommand(cmdObj));
}
const auto dbVersionElem = cmdObj.getField(kDbVersionField);
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 7710cd8a797..adfb9176291 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1118,7 +1118,7 @@ void receivedInsert(OperationContext* opCtx, const NamespaceString& nsString, co
for (const auto& obj : insertOp.getDocuments()) {
Status status =
- AuthorizationSession::get(opCtx->getClient())->checkAuthForInsert(opCtx, nsString, obj);
+ AuthorizationSession::get(opCtx->getClient())->checkAuthForInsert(opCtx, nsString);
audit::logInsertAuthzCheck(opCtx->getClient(), nsString, obj, status.code());
uassertStatusOK(status);
}