summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2018-08-27 13:06:38 -0400
committerDianna Hohensee <dianna.hohensee@10gen.com>2018-08-29 16:47:09 -0400
commit4cb0742947dabee476c9979cae39c728a21568d5 (patch)
tree8ec6baacaab48a11b20eae4781f32be1bdad6ec2 /src/mongo/s
parentaa36a9e8ad8c98e828f1b53966672b368d973380 (diff)
downloadmongo-4cb0742947dabee476c9979cae39c728a21568d5.tar.gz
SERVER-36015 Remove references to system.namespaces and system.indexes
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp55
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp2
-rw-r--r--src/mongo/s/commands/commands_public.cpp8
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp2
-rw-r--r--src/mongo/s/write_ops/batched_command_request.cpp23
-rw-r--r--src/mongo/s/write_ops/batched_command_request.h6
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp28
-rw-r--r--src/mongo/s/write_ops/cluster_write.cpp15
-rw-r--r--src/mongo/s/write_ops/write_op.cpp10
9 files changed, 33 insertions, 116 deletions
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 79036d4f824..fba1b651576 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -762,29 +762,21 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType coll1;
- coll1.setNs(NamespaceString{"test.system.indexes"});
+ coll1.setNs(NamespaceString{"test.coll1"});
coll1.setUpdatedAt(network()->now());
- coll1.setUnique(true);
+ coll1.setUnique(false);
coll1.setEpoch(OID::gen());
coll1.setKeyPattern(KeyPattern{BSON("_id" << 1)});
ASSERT_OK(coll1.validate());
CollectionType coll2;
- coll2.setNs(NamespaceString{"test.coll1"});
+ coll2.setNs(NamespaceString{"anotherdb.coll1"});
coll2.setUpdatedAt(network()->now());
coll2.setUnique(false);
coll2.setEpoch(OID::gen());
coll2.setKeyPattern(KeyPattern{BSON("_id" << 1)});
ASSERT_OK(coll2.validate());
- CollectionType coll3;
- coll3.setNs(NamespaceString{"anotherdb.coll1"});
- coll3.setUpdatedAt(network()->now());
- coll3.setUnique(false);
- coll3.setEpoch(OID::gen());
- coll3.setKeyPattern(KeyPattern{BSON("_id" << 1)});
- ASSERT_OK(coll3.validate());
-
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, newOpTime] {
@@ -798,49 +790,46 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
return std::move(collections);
});
- onFindWithMetadataCommand(
- [this, coll1, coll2, coll3, newOpTime](const RemoteCommandRequest& request) {
- ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
+ onFindWithMetadataCommand([this, coll1, coll2, newOpTime](const RemoteCommandRequest& request) {
+ ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
+ rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, CollectionType::ConfigNS);
+ const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
+ ASSERT_EQ(nss, CollectionType::ConfigNS);
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
- ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
- ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
+ ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
- checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
+ checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
- ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
- BSONObjBuilder builder;
- metadata.writeToMetadata(&builder).transitional_ignore();
+ ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
+ BSONObjBuilder builder;
+ metadata.writeToMetadata(&builder).transitional_ignore();
- return std::make_tuple(vector<BSONObj>{coll1.toBSON(), coll2.toBSON(), coll3.toBSON()},
- builder.obj());
- });
+ return std::make_tuple(vector<BSONObj>{coll1.toBSON(), coll2.toBSON()}, builder.obj());
+ });
const auto& actualColls = future.timed_get(kFutureTimeout);
- ASSERT_EQ(3U, actualColls.size());
+ ASSERT_EQ(2U, actualColls.size());
ASSERT_BSONOBJ_EQ(coll1.toBSON(), actualColls[0].toBSON());
ASSERT_BSONOBJ_EQ(coll2.toBSON(), actualColls[1].toBSON());
- ASSERT_BSONOBJ_EQ(coll3.toBSON(), actualColls[2].toBSON());
}
TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType coll1;
- coll1.setNs(NamespaceString{"test.system.indexes"});
+ coll1.setNs(NamespaceString{"test.coll1"});
coll1.setUpdatedAt(network()->now());
coll1.setUnique(true);
coll1.setEpoch(OID::gen());
coll1.setKeyPattern(KeyPattern{BSON("_id" << 1)});
CollectionType coll2;
- coll2.setNs(NamespaceString{"test.coll1"});
+ coll2.setNs(NamespaceString{"test.coll2"});
coll2.setUpdatedAt(network()->now());
coll2.setUnique(false);
coll2.setEpoch(OID::gen());
@@ -890,7 +879,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
});
CollectionType validColl;
- validColl.setNs(NamespaceString{"test.system.indexes"});
+ validColl.setNs(NamespaceString{"test.coll1"});
validColl.setUpdatedAt(network()->now());
validColl.setUnique(true);
validColl.setEpoch(OID::gen());
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index 25ad8bcb48e..9ff50ec8c59 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -149,7 +149,7 @@ private:
std::vector<Strategy::CommandResult>* results) {
// Note that this implementation will not handle targeting retries and does not completely
// emulate write behavior
- ChunkManagerTargeter targeter(targetingBatchItem.getRequest()->getTargetingNS());
+ ChunkManagerTargeter targeter(targetingBatchItem.getRequest()->getNS());
Status status = targeter.init(opCtx);
if (!status.isOK())
return status;
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 96c448988aa..9aee19e9b97 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -475,15 +475,11 @@ public:
const BSONObj& cmdObj) const override {
AuthorizationSession* authzSession = AuthorizationSession::get(client);
- // Check for the listIndexes ActionType on the database, or find on system.indexes for pre
- // 3.0 systems.
+ // Check for the listIndexes ActionType on the database.
const NamespaceString ns(parseNs(dbname, cmdObj));
if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns),
- ActionType::listIndexes) ||
- authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(dbname, "system.indexes")),
- ActionType::find)) {
+ ActionType::listIndexes)) {
return Status::OK();
}
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 94cd978b401..120d7ca33f8 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -208,7 +208,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
AsyncRequestsSender ars(opCtx,
Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
- clientRequest.getTargetingNS().db().toString(),
+ clientRequest.getNS().db().toString(),
requests,
kPrimaryOnlyReadPreference,
opCtx->getTxnNumber() ? Shard::RetryPolicy::kIdempotent
diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp
index c500bc201eb..c94f6469aac 100644
--- a/src/mongo/s/write_ops/batched_command_request.cpp
+++ b/src/mongo/s/write_ops/batched_command_request.cpp
@@ -77,25 +77,6 @@ const NamespaceString& BatchedCommandRequest::getNS() const {
return _visit([](auto&& op) -> decltype(auto) { return op.getNamespace(); });
}
-NamespaceString BatchedCommandRequest::getTargetingNS() const {
- if (!isInsertIndexRequest()) {
- return getNS();
- }
-
- const auto& documents = _insertReq->getDocuments();
- invariant(documents.size() == 1);
-
- return NamespaceString(documents.at(0)["ns"].str());
-}
-
-bool BatchedCommandRequest::isInsertIndexRequest() const {
- if (_batchType != BatchedCommandRequest::BatchType_Insert) {
- return false;
- }
-
- return getNS().isSystemDotIndexes();
-}
-
std::size_t BatchedCommandRequest::sizeWriteOps() const {
struct Visitor {
auto operator()(const write_ops::Insert& op) const {
@@ -163,10 +144,6 @@ BatchedCommandRequest BatchedCommandRequest::cloneInsertWithIds(
BatchedCommandRequest newCmdRequest(std::move(origCmdRequest));
- if (newCmdRequest.isInsertIndexRequest()) {
- return newCmdRequest;
- }
-
const auto& origDocs = newCmdRequest._insertReq->getDocuments();
std::vector<BSONObj> newDocs;
diff --git a/src/mongo/s/write_ops/batched_command_request.h b/src/mongo/s/write_ops/batched_command_request.h
index 29812e1b9d0..86b418c3a9d 100644
--- a/src/mongo/s/write_ops/batched_command_request.h
+++ b/src/mongo/s/write_ops/batched_command_request.h
@@ -68,12 +68,6 @@ public:
}
const NamespaceString& getNS() const;
- NamespaceString getTargetingNS() const;
-
- /**
- * Index creation can be expressed as an insert into the 'system.indexes' namespace.
- */
- bool isInsertIndexRequest() const;
const auto& getInsertRequest() const {
invariant(_insertReq);
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 437655463a0..fdab37989b9 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -104,7 +104,6 @@ TEST(BatchedCommandRequest, InsertCloneWithIds) {
const auto clonedRequest(BatchedCommandRequest::cloneInsertWithIds(std::move(batchedRequest)));
ASSERT_EQ("xyz.abc", clonedRequest.getNS().ns());
- ASSERT_EQ("xyz.abc", clonedRequest.getTargetingNS().ns());
ASSERT(clonedRequest.getWriteCommandBase().getOrdered());
ASSERT(clonedRequest.getWriteCommandBase().getBypassDocumentValidation());
ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
@@ -119,32 +118,5 @@ TEST(BatchedCommandRequest, InsertCloneWithIds) {
ASSERT_EQ(2, insertDocs[1]["x"].numberLong());
}
-TEST(BatchedCommandRequest, IndexInsertCloneWithIds) {
- const auto indexSpec = BSON("v" << 1 << "key" << BSON("x" << -1) << "name"
- << "Test index"
- << "ns"
- << "xyz.abc");
-
- BatchedCommandRequest batchedRequest([&] {
- write_ops::Insert insertOp(NamespaceString("xyz.system.indexes"));
- insertOp.setDocuments({indexSpec});
- return insertOp;
- }());
- batchedRequest.setWriteConcern(BSON("w" << 2));
-
- const auto clonedRequest(BatchedCommandRequest::cloneInsertWithIds(std::move(batchedRequest)));
-
- ASSERT_EQ("xyz.system.indexes", clonedRequest.getNS().ns());
- ASSERT_EQ("xyz.abc", clonedRequest.getTargetingNS().ns());
- ASSERT(clonedRequest.getWriteCommandBase().getOrdered());
- ASSERT(!clonedRequest.getWriteCommandBase().getBypassDocumentValidation());
- ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
-
- const auto& insertDocs = clonedRequest.getInsertRequest().getDocuments();
- ASSERT_EQ(1u, insertDocs.size());
-
- ASSERT_BSONOBJ_EQ(indexSpec, insertDocs[0]);
-}
-
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/write_ops/cluster_write.cpp b/src/mongo/s/write_ops/cluster_write.cpp
index 421383d99ca..16159dbca3c 100644
--- a/src/mongo/s/write_ops/cluster_write.cpp
+++ b/src/mongo/s/write_ops/cluster_write.cpp
@@ -75,15 +75,14 @@ void ClusterWriter::write(OperationContext* opCtx,
Grid::get(opCtx)->catalogClient()->writeConfigServerDirect(opCtx, request, response);
} else {
{
- ChunkManagerTargeter targeter(request.getTargetingNS());
+ ChunkManagerTargeter targeter(request.getNS());
Status targetInitStatus = targeter.init(opCtx);
if (!targetInitStatus.isOK()) {
toBatchError(targetInitStatus.withContext(
- str::stream() << "unable to initialize targeter for"
- << (request.isInsertIndexRequest() ? " index" : "")
- << " write op for collection "
- << request.getTargetingNS().ns()),
+ str::stream()
+ << "unable to initialize targeter for write op for collection "
+ << request.getNS().ns()),
response);
return;
}
@@ -91,10 +90,8 @@ void ClusterWriter::write(OperationContext* opCtx,
auto swEndpoints = targeter.targetCollection();
if (!swEndpoints.isOK()) {
toBatchError(swEndpoints.getStatus().withContext(
- str::stream() << "unable to target"
- << (request.isInsertIndexRequest() ? " index" : "")
- << " write op for collection "
- << request.getTargetingNS().ns()),
+ str::stream() << "unable to target write op for collection "
+ << request.getNS().ns()),
response);
return;
}
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index b125f35a970..ec10197376c 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -53,16 +53,8 @@ const WriteErrorDetail& WriteOp::getOpError() const {
Status WriteOp::targetWrites(OperationContext* opCtx,
const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites) {
- const bool isIndexInsert = _itemRef.getRequest()->isInsertIndexRequest();
-
auto swEndpoints = [&]() -> StatusWith<std::vector<ShardEndpoint>> {
if (_itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert) {
- if (isIndexInsert) {
- // TODO: Remove the index targeting stuff once there is a command for it?
- // TODO: Retry index writes with stale version?
- return targeter.targetCollection();
- }
-
auto swEndpoint = targeter.targetInsert(opCtx, _itemRef.getDocument());
if (!swEndpoint.isOK())
return swEndpoint.getStatus();
@@ -82,7 +74,7 @@ Status WriteOp::targetWrites(OperationContext* opCtx,
//
// NOTE: Index inserts are currently specially targeted only at the current collection to avoid
// creating collections everywhere.
- if (swEndpoints.isOK() && swEndpoints.getValue().size() > 1u && !isIndexInsert) {
+ if (swEndpoints.isOK() && swEndpoints.getValue().size() > 1u) {
swEndpoints = targeter.targetAllShards(opCtx);
}