summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/apply_ops1.js11
-rw-r--r--src/mongo/db/repl/apply_ops.cpp14
-rw-r--r--src/mongo/db/repl/dbcheck.cpp1
-rw-r--r--src/mongo/db/repl/dbcheck.h1
-rw-r--r--src/mongo/db/repl/oplog.cpp20
-rw-r--r--src/mongo/db/repl/oplog.h8
-rw-r--r--src/mongo/db/repl/oplog_entry.cpp7
-rw-r--r--src/mongo/db/repl/sync_tail.cpp4
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp16
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp44
10 files changed, 77 insertions, 49 deletions
diff --git a/jstests/core/apply_ops1.js b/jstests/core/apply_ops1.js
index 0b098bed8a0..0dc0586e43e 100644
--- a/jstests/core/apply_ops1.js
+++ b/jstests/core/apply_ops1.js
@@ -365,7 +365,10 @@
assert.eq(true, res.results[1], "Valid update failed");
// Ops with transaction numbers are valid.
- var lsid = {id: UUID()};
+ const lsid = {
+ "id": UUID("3eea4a58-6018-40b6-8743-6a55783bf902"),
+ "uid": BinData(0, "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=")
+ };
res = db.runCommand({
applyOps: [
{
@@ -374,7 +377,7 @@
o: {_id: 7, x: 24},
lsid: lsid,
txnNumber: NumberLong(1),
- stmdId: 0
+ stmtId: NumberInt(0)
},
{
op: "u",
@@ -383,7 +386,7 @@
o: {$set: {x: 25}},
lsid: lsid,
txnNumber: NumberLong(1),
- stmdId: 1
+ stmtId: NumberInt(1)
},
{
op: "d",
@@ -391,7 +394,7 @@
o: {_id: 7},
lsid: lsid,
txnNumber: NumberLong(2),
- stmdId: 0
+ stmtId: NumberInt(0)
},
]
});
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 4b18943e2b6..41e1dc4cdfa 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -202,10 +202,20 @@ Status _applyOps(OperationContext* opCtx,
"applyOps",
nss.ns(),
[opCtx, nss, opObj, opType, alwaysUpsert, oplogApplicationMode] {
+ BSONObjBuilder builder;
+ builder.appendElements(opObj);
+ if (!builder.hasField(OplogEntry::kTimestampFieldName)) {
+ builder.append(OplogEntry::kTimestampFieldName, Timestamp());
+ }
+ if (!builder.hasField(OplogEntry::kHashFieldName)) {
+ builder.append(OplogEntry::kHashFieldName, 0LL);
+ }
+ auto entryObj = builder.done();
+ auto entry = uassertStatusOK(OplogEntry::parse(entryObj));
if (*opType == 'c') {
invariant(opCtx->lockState()->isW());
- uassertStatusOK(
- repl::applyCommand_inlock(opCtx, opObj, oplogApplicationMode));
+ uassertStatusOK(repl::applyCommand_inlock(
+ opCtx, opObj, entry, oplogApplicationMode));
return Status::OK();
}
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index 5edd664a28d..8cc837a7096 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -504,6 +504,7 @@ Status dbCheckOplogCommand(OperationContext* opCtx,
const BSONElement& ui,
BSONObj& cmd,
const repl::OpTime& optime,
+ const repl::OplogEntry& entry,
OplogApplication::Mode mode) {
auto type = OplogEntries_parse(IDLParserErrorContext("type"), cmd.getStringField("type"));
IDLParserErrorContext ctx("o");
diff --git a/src/mongo/db/repl/dbcheck.h b/src/mongo/db/repl/dbcheck.h
index e7fc1921a4e..97891321a6d 100644
--- a/src/mongo/db/repl/dbcheck.h
+++ b/src/mongo/db/repl/dbcheck.h
@@ -217,6 +217,7 @@ Status dbCheckOplogCommand(OperationContext* opCtx,
const BSONElement& ui,
BSONObj& cmd,
const repl::OpTime& optime,
+ const repl::OplogEntry& entry,
OplogApplication::Mode mode);
}
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 13cdc72d0d3..34f6ace269d 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -790,6 +790,7 @@ using OpApplyFn = stdx::function<Status(OperationContext* opCtx,
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode)>;
struct ApplyOpMetadata {
@@ -813,6 +814,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
const NamespaceString nss(parseNs(ns, cmd));
if (auto idIndexElem = cmd["idIndex"]) {
@@ -839,6 +841,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
const NamespaceString nss(parseUUIDorNs(opCtx, ns, ui, cmd));
BSONElement first = cmd.firstElement();
@@ -861,6 +864,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
NamespaceString nss;
std::tie(std::ignore, nss) = parseCollModUUIDAndNss(opCtx, ui, ns, cmd);
@@ -876,6 +880,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
return dropDatabase(opCtx, NamespaceString(ns).db().toString());
},
@@ -886,6 +891,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
BSONObjBuilder resultWeDontCareAbout;
auto nss = parseUUIDorNs(opCtx, ns, ui, cmd);
@@ -910,6 +916,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
BSONObjBuilder resultWeDontCareAbout;
return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout);
@@ -921,6 +928,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
BSONObjBuilder resultWeDontCareAbout;
return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout);
@@ -932,6 +940,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
BSONObjBuilder resultWeDontCareAbout;
return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout);
@@ -943,6 +952,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
BSONObjBuilder resultWeDontCareAbout;
return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout);
@@ -954,6 +964,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
return renameCollectionForApplyOps(opCtx, nsToDatabase(ns), ui, cmd, opTime);
},
@@ -964,6 +975,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
BSONObjBuilder resultWeDontCareAbout;
return applyOps(opCtx, nsToDatabase(ns), cmd, mode, opTime, &resultWeDontCareAbout);
@@ -974,6 +986,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
return convertToCapped(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd["size"].number());
},
@@ -984,6 +997,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
return emptyCapped(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd));
},
@@ -994,6 +1008,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status { return Status::OK(); }}},
{"abortTransaction",
{[](OperationContext* opCtx,
@@ -1001,6 +1016,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
const BSONElement& ui,
BSONObj& cmd,
const OpTime& opTime,
+ const OplogEntry& entry,
OplogApplication::Mode mode) -> Status {
// We don't put transactions into the prepare state until the end of recovery, so there is
// no transaction to abort.
@@ -1510,6 +1526,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
Status applyCommand_inlock(OperationContext* opCtx,
const BSONObj& op,
+ const OplogEntry& entry,
OplogApplication::Mode mode) {
LOG(3) << "applying command op: " << redact(op)
<< ", oplog application mode: " << OplogApplication::modeToString(mode);
@@ -1629,7 +1646,8 @@ Status applyCommand_inlock(OperationContext* opCtx,
// If 'writeTime' is not null, any writes in this scope will be given 'writeTime' as
// their timestamp at commit.
TimestampBlock tsBlock(opCtx, writeTime);
- status = curOpToApply.applyFunc(opCtx, nss.ns().c_str(), fieldUI, o, opTime, mode);
+ status =
+ curOpToApply.applyFunc(opCtx, nss.ns().c_str(), fieldUI, o, opTime, entry, mode);
} catch (...) {
status = exceptionToStatus();
}
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index f64a0d0dcb6..9ed57bd410e 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -37,6 +37,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/logical_session_id.h"
+#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/stdx/functional.h"
@@ -222,10 +223,13 @@ Status applyOperation_inlock(OperationContext* opCtx,
/**
* Take a command op and apply it locally
- * Used for applying from an oplog
+ * Used for applying from an oplog and for applyOps command.
* Returns failure status if the op that could not be applied.
*/
-Status applyCommand_inlock(OperationContext* opCtx, const BSONObj& op, OplogApplication::Mode mode);
+Status applyCommand_inlock(OperationContext* opCtx,
+ const BSONObj& op,
+ const OplogEntry& entry,
+ OplogApplication::Mode mode);
/**
* Initializes the global Timestamp with the value from the timestamp of the last oplog entry.
diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp
index 77000a33949..071a05b7a5a 100644
--- a/src/mongo/db/repl/oplog_entry.cpp
+++ b/src/mongo/db/repl/oplog_entry.cpp
@@ -71,9 +71,10 @@ OplogEntry::CommandType parseCommandType(const BSONObj& objectField) {
} else if (commandString == "abortTransaction") {
return OplogEntry::CommandType::kAbortTransaction;
} else {
- severe() << "Unknown oplog entry command type: " << commandString
- << " Object field: " << redact(objectField);
- fassertFailedNoTrace(40444);
+ uasserted(ErrorCodes::BadValue,
+ str::stream() << "Unknown oplog entry command type: " << commandString
+ << " Object field: "
+ << redact(objectField));
}
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 9a2f06d1039..017d66adf92 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -308,13 +308,15 @@ Status SyncTail::syncApply(OperationContext* opCtx,
boost::optional<Lock::GlobalWrite> globalWriteLock;
// TODO SERVER-37180 Remove this double-parsing.
+ // The command entry has been parsed before, so it must be valid.
+ auto entry = uassertStatusOK(OplogEntry::parse(op));
const StringData commandName(op["o"].embeddedObject().firstElementFieldName());
if (!op.getBoolField("prepare") && commandName != "abortTransaction") {
globalWriteLock.emplace(opCtx);
}
// special case apply for commands to avoid implicit database creation
- Status status = applyCommand_inlock(opCtx, op, oplogApplicationMode);
+ Status status = applyCommand_inlock(opCtx, op, entry, oplogApplicationMode);
incrementOpsAppliedStats();
return status;
});
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 91944c0ebe7..d51babb0075 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -351,7 +351,11 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
<< "ns"
<< nss.getCommandNS().ns()
<< "o"
- << BSON("create" << nss.coll()));
+ << BSON("create" << nss.coll())
+ << "ts"
+ << Timestamp(1, 1)
+ << "h"
+ << 0LL);
bool applyCmdCalled = false;
_opObserver->onCreateCollectionFn = [&](OperationContext* opCtx,
Collection*,
@@ -379,10 +383,14 @@ TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
<< 12345
<< "o"
<< BSON("create"
- << "t"));
- // This test relies on the namespace type check in applyCommand_inlock().
+ << "t")
+ << "ts"
+ << Timestamp(1, 1)
+ << "h"
+ << 0LL);
+ // This test relies on the namespace type check of IDL.
ASSERT_THROWS(SyncTail::syncApply(_opCtx.get(), op, OplogApplication::Mode::kInitialSync),
- ExceptionFor<ErrorCodes::InvalidNamespace>);
+ ExceptionFor<ErrorCodes::TypeMismatch>);
}
DEATH_TEST_F(SyncTailTest, MultiApplyAbortsWhenNoOperationsAreGiven, "!ops.empty()") {
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index f7abe822a24..d5e7cc6ae06 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -639,7 +639,7 @@ public:
<< BSON("_id" << idx))
<< BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
<< "h"
- << 1
+ << 1LL
<< "op"
<< "c"
<< "ns"
@@ -665,7 +665,7 @@ public:
class SecondaryArrayInsertTimes : public StorageTimestampTest {
public:
void run() {
- // In order for applyOps to assign timestamps, we must be in non-replicated mode.
+ // In order for oplog application to assign timestamps, we must be in non-replicated mode.
repl::UnreplicatedWritesBlock uwb(_opCtx);
// Create a new collection.
@@ -676,57 +676,37 @@ public:
const std::uint32_t docsToInsert = 10;
const LogicalTime firstInsertTime = _clock->reserveTicks(docsToInsert);
- BSONObjBuilder fullCommand;
- BSONArrayBuilder applyOpsB(fullCommand.subarrayStart("applyOps"));
- BSONObjBuilder applyOpsElem1Builder;
+ BSONObjBuilder oplogEntryBuilder;
// Populate the "ts" field with an array of all the grouped inserts' timestamps.
- BSONArrayBuilder tsArrayBuilder(applyOpsElem1Builder.subarrayStart("ts"));
+ BSONArrayBuilder tsArrayBuilder(oplogEntryBuilder.subarrayStart("ts"));
for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) {
tsArrayBuilder.append(firstInsertTime.addTicks(idx).asTimestamp());
}
tsArrayBuilder.done();
// Populate the "t" (term) field with an array of all the grouped inserts' terms.
- BSONArrayBuilder tArrayBuilder(applyOpsElem1Builder.subarrayStart("t"));
+ BSONArrayBuilder tArrayBuilder(oplogEntryBuilder.subarrayStart("t"));
for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) {
tArrayBuilder.append(1LL);
}
tArrayBuilder.done();
// Populate the "o" field with an array of all the grouped inserts.
- BSONArrayBuilder oArrayBuilder(applyOpsElem1Builder.subarrayStart("o"));
+ BSONArrayBuilder oArrayBuilder(oplogEntryBuilder.subarrayStart("o"));
for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) {
oArrayBuilder.append(BSON("_id" << idx));
}
oArrayBuilder.done();
- applyOpsElem1Builder << "h" << 0xBEEFBEEFLL << "v" << 2 << "op"
- << "i"
- << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid().get();
-
- applyOpsB.append(applyOpsElem1Builder.done());
-
- BSONObjBuilder applyOpsElem2Builder;
- applyOpsElem2Builder << "ts" << firstInsertTime.addTicks(docsToInsert).asTimestamp() << "t"
- << 1LL << "h" << 1 << "op"
- << "c"
- << "ns"
- << "test.$cmd"
- << "o" << BSON("applyOps" << BSONArrayBuilder().obj());
-
- applyOpsB.append(applyOpsElem2Builder.done());
- applyOpsB.done();
- // Apply the group of inserts.
- BSONObjBuilder result;
- ASSERT_OK(applyOps(_opCtx,
- nss.db().toString(),
- fullCommand.done(),
- repl::OplogApplication::Mode::kApplyOpsCmd,
- {},
- &result));
+ oplogEntryBuilder << "h" << 0xBEEFBEEFLL << "v" << 2 << "op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid().get();
+ auto oplogEntry = oplogEntryBuilder.done();
+ ASSERT_OK(repl::SyncTail::syncApply(
+ _opCtx, oplogEntry, repl::OplogApplication::Mode::kSecondary));
for (std::uint32_t idx = 0; idx < docsToInsert; ++idx) {
OneOffRead oor(_opCtx, firstInsertTime.addTicks(idx).asTimestamp());