summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/rename4.js11
-rw-r--r--jstests/repl/basic1.js3
-rw-r--r--jstests/sharding/update_immutable_fields.js98
-rw-r--r--jstests/update_dbref.js35
-rw-r--r--jstests/update_replace.js45
-rw-r--r--jstests/update_setOnInsert.js18
-rw-r--r--jstests/upsert1.js1
-rw-r--r--jstests/upsert3.js51
-rw-r--r--src/mongo/SConscript1
-rw-r--r--src/mongo/base/error_codes.err4
-rw-r--r--src/mongo/bson/bsonobj.h24
-rw-r--r--src/mongo/bson/mutable/document.cpp6
-rw-r--r--src/mongo/bson/mutable/document.h3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp4
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp5
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp6
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp4
-rw-r--r--src/mongo/db/dbhelpers.cpp5
-rw-r--r--src/mongo/db/field_ref_set.cpp29
-rw-r--r--src/mongo/db/field_ref_set.h19
-rw-r--r--src/mongo/db/instance.cpp16
-rw-r--r--src/mongo/db/jsobj.cpp12
-rw-r--r--src/mongo/db/ops/SConscript1
-rw-r--r--src/mongo/db/ops/field_checker.cpp85
-rw-r--r--src/mongo/db/ops/field_checker.h8
-rw-r--r--src/mongo/db/ops/field_checker_test.cpp179
-rw-r--r--src/mongo/db/ops/modifier_object_replace.cpp22
-rw-r--r--src/mongo/db/ops/modifier_object_replace_test.cpp80
-rw-r--r--src/mongo/db/ops/modifier_push.cpp22
-rw-r--r--src/mongo/db/ops/modifier_push_test.cpp24
-rw-r--r--src/mongo/db/ops/modifier_rename.cpp60
-rw-r--r--src/mongo/db/ops/modifier_rename_test.cpp58
-rw-r--r--src/mongo/db/ops/modifier_set.cpp18
-rw-r--r--src/mongo/db/ops/modifier_set_test.cpp7
-rw-r--r--src/mongo/db/ops/modifier_unset.cpp2
-rw-r--r--src/mongo/db/ops/update.cpp475
-rw-r--r--src/mongo/db/ops/update_driver.cpp331
-rw-r--r--src/mongo/db/ops/update_driver.h58
-rw-r--r--src/mongo/db/ops/update_driver_test.cpp195
-rw-r--r--src/mongo/db/ops/update_lifecycle.h64
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.cpp85
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.h60
-rw-r--r--src/mongo/db/ops/update_request.h29
-rw-r--r--src/mongo/db/repl/oplog.cpp7
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp3
-rw-r--r--src/mongo/s/collection_metadata.cpp19
-rw-r--r--src/mongo/s/collection_metadata.h13
-rw-r--r--src/mongo/s/metadata_loader.cpp2
-rw-r--r--src/mongo/shell/assert.js38
49 files changed, 1284 insertions, 1061 deletions
diff --git a/jstests/rename4.js b/jstests/rename4.js
index 989fff510fc..bca6c6928a6 100644
--- a/jstests/rename4.js
+++ b/jstests/rename4.js
@@ -27,12 +27,6 @@ function bad( f ) {
bad( "t.update( {}, {$rename:{'a':'a'}} )" );
bad( "t.update( {}, {$rename:{'':'a'}} )" );
bad( "t.update( {}, {$rename:{'a':''}} )" );
-bad( "t.update( {}, {$rename:{'_id':'a'}} )" );
-bad( "t.update( {}, {$rename:{'a':'_id'}} )" );
-bad( "t.update( {}, {$rename:{'_id.a':'b'}} )" );
-bad( "t.update( {}, {$rename:{'b':'_id.a'}} )" );
-bad( "t.update( {}, {$rename:{'_id.a':'_id.b'}} )" );
-bad( "t.update( {}, {$rename:{'_id.b':'_id.a'}} )" );
bad( "t.update( {}, {$rename:{'.a':'b'}} )" );
bad( "t.update( {}, {$rename:{'a':'.b'}} )" );
bad( "t.update( {}, {$rename:{'a.':'b'}} )" );
@@ -42,7 +36,8 @@ bad( "t.update( {}, {$rename:{'a.$':'b'}} )" );
bad( "t.update( {}, {$rename:{'a':'b.$'}} )" );
// Only bad if input doc has field resulting in conflict
-t.save( {_id:1, a:1} );
+t.save( {_id:1, a:2} );
+bad( "t.update( {}, {$rename:{'_id':'a'}} )" );
bad( "t.update( {}, {$set:{b:1},$rename:{'a':'b'}} )" );
bad( "t.update( {}, {$rename:{'a':'b'},$set:{b:1}} )" );
bad( "t.update( {}, {$rename:{'a':'b'},$set:{a:1}} )" );
@@ -53,7 +48,7 @@ bad( "t.update( {}, {$rename:{'a':'b.c'},$set:{b:1}} )" );
t.remove({});
-t.save( {a:[1],b:{c:[1]},d:[{e:1}],f:1} );
+t.save( {a:[1],b:{c:[2]},d:[{e:3}],f:4} );
bad( "t.update( {}, {$rename:{'a.0':'f'}} )" );
bad( "t.update( {}, {$rename:{'a.0':'g'}} )" );
bad( "t.update( {}, {$rename:{'f':'a.0'}} )" );
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 4a6091d9755..ccde8874fbd 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -24,6 +24,9 @@ function check( note ){
return;
sleep( 200 );
}
+ lastOpLogEntry = m.getDB("local").oplog.$main.find({op:{$ne:"n"}}).sort({$natural:-1}).limit(-1).next();
+ note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray())
+ + "last oplog:" + tojson(lastOpLogEntry);
assert.eq( x.md5 , y.md5 , note );
}
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
new file mode 100644
index 00000000000..71497466f33
--- /dev/null
+++ b/jstests/sharding/update_immutable_fields.js
@@ -0,0 +1,98 @@
+// Tests that updates can't change immutable fields (used in sharded system)
+var st = new ShardingTest({shards : 2,
+ mongos : 1,
+ verbose : 0,
+ other : {separateConfig : 1}})
+st.stopBalancer();
+
+var mongos = st.s;
+var config = mongos.getDB("config");
+var coll = mongos.getCollection(jsTestName() + ".coll1");
+var shard0 = st.shard0;
+
+printjson(config.adminCommand({enableSharding : coll.getDB() + ""}))
+printjson(config.adminCommand({shardCollection : "" + coll, key : {a : 1}}))
+
+var getDirectShardedConn = function( st, collName ) {
+
+ var shardConnWithVersion = new Mongo( st.shard0.host );
+
+ var mockServerId = new ObjectId();
+ var configConnStr = st._configDB;
+
+ var maxChunk = st.s0.getCollection( "config.chunks" )
+ .find({ ns : collName }).sort({ lastmod : -1 }).next();
+
+ var ssvInitCmd = { setShardVersion : collName,
+ configdb : configConnStr,
+ serverID : mockServerId,
+ version : maxChunk.lastmod,
+ versionEpoch : maxChunk.lastmodEpoch };
+
+ printjson( ssvInitCmd );
+
+ assert.commandWorked( shardConnWithVersion.getDB( "admin" ).runCommand( ssvInitCmd ) );
+
+ return shardConnWithVersion;
+}
+
+var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
+
+// No shard key
+shard0Coll.remove()
+shard0Coll.save({_id:3})
+assert.gleError(shard0Coll.getDB(), function(gle) {
+ return "save without shard key passed - " + tojson(gle) + " doc: " + tojson(shard0Coll.findOne())
+});
+
+// Full shard key in save
+shard0Coll.save({_id: 1, a: 1})
+assert.gleSuccess(shard0Coll.getDB(), "save with shard key failed");
+
+// Full shard key on replacement (basically the same as above)
+shard0Coll.remove()
+shard0Coll.update({_id: 1}, {a:1}, true)
+assert.gleSuccess(shard0Coll.getDB(), "update + upsert (replacement) with shard key failed");
+
+// Full shard key after $set
+shard0Coll.remove()
+shard0Coll.update({_id: 1}, {$set: {a: 1}}, true)
+assert.gleSuccess(shard0Coll.getDB(), "update + upsert ($set) with shard key failed");
+
+// Update existing doc (replacement), same shard key value
+shard0Coll.update({_id: 1}, {a:1})
+assert.gleSuccess(shard0Coll.getDB(), "update (replacement) with shard key failed");
+
+//Update existing doc ($set), same shard key value
+shard0Coll.update({_id: 1}, {$set: {a: 1}})
+assert.gleSuccess(shard0Coll.getDB(), "update ($set) with shard key failed");
+
+// Error due to mutating the shard key (replacement)
+shard0Coll.update({_id: 1}, {b:1})
+assert.gleError(shard0Coll.getDB(), "update (replacement) removes shard key");
+
+// Error due to mutating the shard key ($set)
+shard0Coll.update({_id: 1}, {$unset: {a: 1}})
+assert.gleError(shard0Coll.getDB(), "update ($unset) removes shard key");
+
+// Error due to removing all the embedded fields.
+shard0Coll.remove()
+
+shard0Coll.save({_id: 2, a:{c:1, b:1}})
+assert.gleSuccess(shard0Coll.getDB(), "save with shard key failed -- 1");
+
+shard0Coll.update({}, {$unset: {"a.c": 1}})
+assert.gleError(shard0Coll.getDB(), function(gle) {
+ return "unsetting part of shard key passed - " + tojson(gle) +
+ " doc: " + tojson(shard0Coll.findOne())
+});
+
+shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}})
+assert.gleError(shard0Coll.getDB(), function(gle) {
+ return "unsetting nested fields of shard key passed - " + tojson(gle) +
+ " doc: " + tojson(shard0Coll.findOne())
+});
+
+db.adminCommand("unsetSharding");
+jsTest.log("DONE!"); // distinguishes shutdown failures
+st.stop();
diff --git a/jstests/update_dbref.js b/jstests/update_dbref.js
new file mode 100644
index 00000000000..d435b8c3416
--- /dev/null
+++ b/jstests/update_dbref.js
@@ -0,0 +1,35 @@
+// Test that we can update DBRefs, but not dbref fields outside a DBRef
+
+t = db.jstests_update_dbref;
+t.drop();
+
+t.save({_id:1, a: new DBRef("a", "b")});
+assert.docEq({_id:1, a: new DBRef("a", "b")}, t.findOne());
+
+t.update({}, {$set: {"a.$id": 2}});
+assert.gleSuccess(db, "a.$id update");
+assert.docEq({_id:1, a: new DBRef("a", 2)}, t.findOne());
+
+t.update({}, {$set: {"a.$ref": "b"}});
+assert.gleSuccess(db, "a.$ref update");
+
+assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+
+// Bad updates
+t.update({}, {$set: {"$id": 3}});
+assert.gleErrorRegex(db, /\$id/, "expected bad update because of $id")
+assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+
+t.update({}, {$set: {"$ref": "foo"}});
+assert.gleErrorRegex(db, /\$ref/, "expected bad update because of $ref")
+assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+
+t.update({}, {$set: {"$db": "aDB"}});
+assert.gleErrorRegex(db, /\$db/, "expected bad update because of $db")
+assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+
+t.update({}, {$set: {"b.$id": 2}});
+assert.gleError(db, function() { return "b.$id update -- doc:" + tojson(t.findOne())});
+
+t.update({}, {$set: {"b.$ref": 2}});
+assert.gleError(db, function() { return "b.$ref update -- doc:" + tojson(t.findOne())});
diff --git a/jstests/update_replace.js b/jstests/update_replace.js
new file mode 100644
index 00000000000..1bee0a8597a
--- /dev/null
+++ b/jstests/update_replace.js
@@ -0,0 +1,45 @@
+// This test checks validation of the replaced doc (on the server) for dots, $prefix and _id
+
+t = db.jstests_update_replace;
+t.drop();
+
+// disable client side checks so we can test the server
+DBCollection.prototype._validateForStorage = function() {};
+
+// Should not allow "." in field names
+t.save({_id:1, "a.a":1})
+assert.gleError(db, "a.a");
+
+// Should not allow "." in field names, embedded
+t.save({_id:1, a :{"a.a":1}})
+assert.gleError(db, "a: a.a");
+
+// Should not allow "$"-prefixed field names, caught before "." check
+t.save({_id:1, $a :{"a.a":1}})
+assert.gleError(db, "$a: a.a");
+
+// Should not allow "$"-prefixed field names
+t.save({_id:1, $a: 1})
+assert.gleError(db, "$a");
+
+// _id validation checks
+
+// Should not allow regex _id
+t.save({_id: /a/})
+assert.gleError(db, "_id regex");
+
+// Should not allow regex _id, even if not first
+t.save({a:2, _id: /a/})
+assert.gleError(db, "a _id regex");
+
+// Should not allow array _id
+t.save({_id: [9]})
+assert.gleError(db, "_id array");
+
+// This is fine since _id isn't a top level field
+t.save({a :{ _id: [9]}})
+assert.gleSuccess(db, "embedded _id array");
+
+// This is fine since _id isn't a top level field
+t.save({b:1, a :{ _id: [9]}})
+assert.gleSuccess(db, "b embedded _id array"); \ No newline at end of file
diff --git a/jstests/update_setOnInsert.js b/jstests/update_setOnInsert.js
index ddc699c70ed..be215ab408d 100644
--- a/jstests/update_setOnInsert.js
+++ b/jstests/update_setOnInsert.js
@@ -1,4 +1,4 @@
-
+// This tests that $setOnInsert works and allow setting the _id
t = db.update_setOnInsert;
db.setProfilingLevel( 2 );
@@ -29,3 +29,19 @@ function dotest( useIndex ) {
dotest( false );
dotest( true );
+
+
+// Cases for SERVER-9958 -- Allow _id $setOnInsert during insert (if upsert:true, and not doc found)
+t.drop();
+
+t.update( {_id: 1} , { $setOnInsert: { "_id.a": new Date() } } , true );
+assert.gleError(db, function(gle) {
+ return "$setOnInsert _id.a - " + tojson(gle) + tojson(t.findOne()) } );
+
+t.update( {"_id.a": 4} , { $setOnInsert: { "_id.b": 1 } } , true );
+assert.gleError(db, function(gle) {
+ return "$setOnInsert _id.b - " + tojson(gle) + tojson(t.findOne()) } );
+
+t.update( {"_id.a": 4} , { $setOnInsert: { "_id": {a:4, b:1} } } , true );
+assert.gleError(db, function(gle) {
+ return "$setOnInsert _id 3 - " + tojson(gle) + tojson(t.findOne()) } );
diff --git a/jstests/upsert1.js b/jstests/upsert1.js
index 15777cbbc24..3c208520b5f 100644
--- a/jstests/upsert1.js
+++ b/jstests/upsert1.js
@@ -51,6 +51,7 @@ db.createCollection("no_id", {autoIndexId:false})
db.no_id.update({foo:1}, {$set:{a:1}}, true)
l = db.getLastErrorCmd();
assert.eq( l.upserted , undefined , "H1 - " + tojson(l) );
+assert( !l.err, "H1.5 No error expected - " + tojson(l) )
assert.eq( 0, db.no_id.getIndexes().length, "H2" );
assert.eq( 1, db.no_id.count(), "H3" );
assert.eq( { foo : 1, a : 1 }, db.no_id.findOne(), "H4" );
diff --git a/jstests/upsert3.js b/jstests/upsert3.js
new file mode 100644
index 00000000000..5faefd05f4b
--- /dev/null
+++ b/jstests/upsert3.js
@@ -0,0 +1,51 @@
+// tests to make sure no dup fields are created when using query to do upsert
+t = db.upsert1;
+t.drop();
+
+// make sure the new _id is not duplicated
+t.update( {"a.b": 1, a: {a: 1, b: 1}} , {$inc: {y: 1}} , true );
+assert.gleError(db, function(gle) {
+ return "a.b-1 - " + tojson(gle) + " doc:" + tojson(t.findOne()) });
+
+t.update( {"_id.a": 1, _id: {a: 1, b: 1}} , {$inc : {y: 1}} , true );
+assert.gleError(db, function(gle) {
+ return "_id-1 - " + tojson(gle) + " doc:" + tojson(t.findOne()) });
+
+t.update( {_id: {a: 1, b: 1}, "_id.a": 1} , { $inc: {y: 1}} , true );
+assert.gleError(db, function(gle) {
+ return "_id-2 - " + tojson(gle) + " doc:" + tojson(t.findOne()) });
+
+// Should be redundant, but including from SERVER-11363
+t.update( {_id: {a: 1, b: 1}, "_id.a": 1} , {$setOnInsert: {y: 1}} , true );
+assert.gleError(db, function(gle) {
+ return "_id-3 - " + tojson(gle) + " doc:" + tojson(t.findOne()) });
+
+//Should be redundant, but including from SERVER-11514
+t.update( {"a": {}, "a.c": 2} , {$set: {x: 1}}, true );
+assert.gleError(db, function(gle) {
+ return "a.c-1 - " + tojson(gle) + " doc:" + tojson(t.findOne()) });
+
+// Should be redundant, but including from SERVER-4830
+t.update( {'a': {b: 1}, 'a.c': 1}, {$inc: {z: 1}}, true );
+assert.gleError(db, function(gle) {
+ return "a-1 - " + tojson(gle) + " doc:" + tojson(t.findOne()) });
+
+// Should be redundant, but including from SERVER-4830
+t.update( {a: 1, "a.b": 1, a: [1, {b: 1}]}, {$inc: {z: 1}}, true );
+assert.gleError(db, function(gle) {
+ return "a-2 - " + tojson(gle) + " doc:" + tojson(t.findOne()) });
+
+// Replacement tests
+// Query is ignored for replacements, except _id field.
+t.update( {r: {a: 1, b: 1}, "r.a": 1} , {y: 1} , true );
+assert.gleSuccess(db, "r-1");
+assert(t.findOne().y, 1, "inserted doc missing field")
+var docMinusId = t.findOne();
+delete docMinusId._id
+assert.docEq({y: 1}, docMinusId, "r-1")
+t.drop()
+
+t.update( {_id: {a:1, b:1}, "_id.a": 1} , {y: 1} , true );
+assert.gleSuccess(db, "_id-4");
+assert.docEq({_id: {a: 1, b: 1}, y: 1}, t.findOne(), "_id-4")
+t.drop() \ No newline at end of file
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index cc9afe0284d..1abe0496572 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -620,6 +620,7 @@ serverOnlyFiles = [ "db/curop.cpp",
"db/write_concern.cpp",
"db/startup_warnings.cpp",
"db/storage_options.cpp",
+ "db/ops/update_lifecycle_impl.cpp",
# most commands are only for mongod
"db/commands/apply_ops.cpp",
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err
index a6a4168197a..9e8bc693b0d 100644
--- a/src/mongo/base/error_codes.err
+++ b/src/mongo/base/error_codes.err
@@ -53,7 +53,7 @@ error_code("ExceededTimeLimit", 50)
error_code("ManualInterventionRequired", 51)
error_code("DollarPrefixedFieldName", 52)
error_code("InvalidIdField", 53)
-error_code("ImmutableIdField", 54)
+error_code("NotSingleValueField", 54)
error_code("InvalidDBRef", 55)
error_code("EmptyFieldName", 56)
error_code("DottedFieldName", 57)
@@ -65,7 +65,7 @@ error_code("OplogOperationUnsupported", 62)
error_code("StaleShardVersion", 63)
error_code("WriteConcernFailed", 64)
error_code("MultipleErrorsOccurred", 65)
-error_code("ImmutableShardKeyField", 66)
+error_code("ImmutableField", 66)
error_code("CannotCreateIndex", 67 )
error_code("IndexAlreadyExists", 68 )
error_code("AuthSchemaIncompatible", 69)
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
index 3a949ec6c13..ea8ac971a5d 100644
--- a/src/mongo/bson/bsonobj.h
+++ b/src/mongo/bson/bsonobj.h
@@ -280,7 +280,7 @@ namespace mongo {
* -- unless it is a dbref ($ref/$id/[$db]/...)
*/
inline bool okForStorage() const {
- return _okForStorage(false).isOK();
+ return _okForStorage(false, true).isOK();
}
/** Same as above with the following extra restrictions
@@ -290,27 +290,31 @@ namespace mongo {
* -- Array
*/
inline bool okForStorageAsRoot() const {
- return _okForStorage(true).isOK();
+ return _okForStorage(true, true).isOK();
}
/**
* Validates that this can be stored as an embedded document
* See details above in okForStorage
*
+ * If 'deep' is true then validation is done to children
+ *
* If not valid a user readable status message is returned.
*/
- inline Status storageValidEmbedded() const {
- return _okForStorage(false);
+ inline Status storageValidEmbedded(const bool deep = true) const {
+ return _okForStorage(false, deep);
}
/**
* Validates that this can be stored as a document (in a collection)
* See details above in okForStorageAsRoot
*
+ * If 'deep' is true then validation is done to children
+ *
* If not valid a user readable status message is returned.
*/
- inline Status storageValid() const {
- return _okForStorage(true);
+ inline Status storageValid(const bool deep = true) const {
+ return _okForStorage(true, deep);
}
/** @return true if object is empty -- i.e., {} */
@@ -563,7 +567,13 @@ namespace mongo {
_assertInvalid();
}
- Status _okForStorage(bool root) const;
+ /**
+ * Validate if the element is okay to be stored in a collection, maybe as the root element
+ *
+ * If 'root' is true then checks against _id are made.
+ * If 'deep' is false then do not traverse through children
+ */
+ Status _okForStorage(bool root, bool deep) const;
};
std::ostream& operator<<( std::ostream &s, const BSONObj &o );
diff --git a/src/mongo/bson/mutable/document.cpp b/src/mongo/bson/mutable/document.cpp
index a77e4923a07..259a2f36330 100644
--- a/src/mongo/bson/mutable/document.cpp
+++ b/src/mongo/bson/mutable/document.cpp
@@ -2181,6 +2181,12 @@ namespace mutablebson {
return Element(this, impl.insertLeafElement(leafRef));
}
+ Element Document::makeElementNewOID(const StringData& fieldName) {
+ OID newOID;
+ newOID.init();
+ return makeElementOID(fieldName, newOID);
+ }
+
Element Document::makeElementOID(const StringData& fieldName, const OID value) {
Impl& impl = getImpl();
dassert(impl.doesNotAlias(fieldName));
diff --git a/src/mongo/bson/mutable/document.h b/src/mongo/bson/mutable/document.h
index 73c8bf3f705..b39fcf8b2ac 100644
--- a/src/mongo/bson/mutable/document.h
+++ b/src/mongo/bson/mutable/document.h
@@ -327,6 +327,9 @@ namespace mutablebson {
/** Create a new undefined Element with the given field name. */
Element makeElementUndefined(const StringData& fieldName);
+ /** Create a new OID + Element with the given field name. */
+ Element makeElementNewOID(const StringData& fieldName);
+
/** Create a new OID Element with the given value and field name. */
Element makeElementOID(const StringData& fieldName, mongo::OID value);
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 0c8735c09d4..c3c9eb0b296 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -251,11 +251,11 @@ namespace {
if (query.hasField("_id")) {
document.root().appendElement(query["_id"]);
}
- status = driver.createFromQuery(query, document);
+ status = driver.populateDocumentWithQueryFields(query, document);
if (!status.isOK()) {
return status;
}
- status = driver.update(StringData(), &document, NULL);
+ status = driver.update(StringData(), &document);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index 836d6504936..fc1b3801a9f 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -183,13 +183,12 @@ namespace {
status = AuthorizationManager::getBSONForRole(
roleGraph, roleToUpdate, roleDocument.root());
if (status == ErrorCodes::RoleNotFound) {
- roleDocument.root().appendElement(queryPattern["_id"]);
- status = driver.createFromQuery(queryPattern, roleDocument);
+ status = driver.populateDocumentWithQueryFields(queryPattern, roleDocument);
}
if (!status.isOK())
return status;
- status = driver.update(StringData(), &roleDocument, NULL);
+ status = driver.update(StringData(), &roleDocument);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 164f5677b82..323bc661899 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/pagefault.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
+#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/queryutil.h"
namespace mongo {
@@ -224,7 +225,10 @@ namespace mongo {
request.setUpdates(update);
request.setUpsert(upsert);
request.setUpdateOpLog();
-
+ // TODO(greg) We need to send if we are ignoring
+ // the shard version below, but for now no
+ UpdateLifecycleImpl updateLifecycle(false, requestNs);
+ request.setLifecycle(&updateLifecycle);
UpdateResult res = mongo::update(request, &cc().curop()->debug());
LOG(3) << "update result: " << res ;
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index b6c362da86d..0b4c7881963 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/lasterror.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
+#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/pagefault.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/write_concern.h"
@@ -490,6 +491,9 @@ namespace mongo {
request.setUpsert( upsert );
request.setMulti( multi );
request.setUpdateOpLog();
+ // TODO(greg) We need to send if we are ignoring the shard version below, but for now yes
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
UpdateResult res = update( request, &opDebug );
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 2ca55e1eaaa..5fae6fc44f5 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/json.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
+#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/ops/update_request.h"
#include "mongo/db/ops/update_result.h"
#include "mongo/db/pagefault.h"
@@ -216,6 +217,8 @@ namespace mongo {
request.setUpsert();
request.setUpdateOpLog();
request.setFromMigration(fromMigrate);
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
update(request, &debug);
}
@@ -230,6 +233,8 @@ namespace mongo {
request.setUpdates(obj);
request.setUpsert();
request.setUpdateOpLog();
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
update(request, &debug);
diff --git a/src/mongo/db/field_ref_set.cpp b/src/mongo/db/field_ref_set.cpp
index c1a3c79cff5..64c5e178b7a 100644
--- a/src/mongo/db/field_ref_set.cpp
+++ b/src/mongo/db/field_ref_set.cpp
@@ -29,9 +29,12 @@
#include "mongo/db/field_ref_set.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
+ namespace str = mongoutils::str;
+
namespace {
// For legacy purposes, we must handle empty fieldnames, which FieldRef clearly
@@ -78,6 +81,19 @@ namespace mongo {
}
}
+ void FieldRefSet::keepShortest(const FieldRef* toInsert) {
+ const FieldRef* conflict;
+ if ( !insert(toInsert, &conflict) && (toInsert->numParts() < (conflict->numParts()))) {
+ _fieldSet.erase(conflict);
+ keepShortest(toInsert);
+ }
+ }
+
+ void FieldRefSet::fillFrom(const std::vector<FieldRef*>& fields) {
+ dassert(_fieldSet.empty());
+ _fieldSet.insert(fields.begin(), fields.end());
+ }
+
bool FieldRefSet::insert(const FieldRef* toInsert, const FieldRef** conflict) {
// We can determine if two fields conflict by checking their common prefix.
@@ -118,4 +134,17 @@ namespace mongo {
return true;
}
+ const std::string FieldRefSet::toString() const {
+ str::stream res;
+ res << "Fields:[ ";
+ FieldRefSet::const_iterator where = _fieldSet.begin();
+ const FieldRefSet::const_iterator end = _fieldSet.end();
+ for( ; where != end; ++where ) {
+ const FieldRef& current = **where;
+ res << current.dottedField() << ",";
+ }
+ res << "]";
+ return res;
+ }
+
} // namespace mongo
diff --git a/src/mongo/db/field_ref_set.h b/src/mongo/db/field_ref_set.h
index c80e3714e69..71a48502739 100644
--- a/src/mongo/db/field_ref_set.h
+++ b/src/mongo/db/field_ref_set.h
@@ -31,6 +31,8 @@
#include <set>
#include "mongo/base/disallow_copying.h"
+#include "mongo/base/owned_pointer_vector.h"
+#include "mongo/base/status.h"
#include "mongo/db/field_ref.h"
namespace mongo {
@@ -70,7 +72,7 @@ namespace mongo {
/**
* Returns true if the field 'toInsert' can be added in the set without
- * conflicts. Otwerwise returns false and fill in '*conflict' with the field 'toInsert'
+ * conflicts. Otherwise returns false and fill in '*conflict' with the field 'toInsert'
* clashed with.
*
* There is no ownership transfer of 'toInsert'. The caller is responsible for
@@ -80,6 +82,16 @@ namespace mongo {
bool insert(const FieldRef* toInsert, const FieldRef** conflict);
/**
+ * Fills the set with the supplied FieldRef*s
+ */
+ void fillFrom(const std::vector<FieldRef*>& fields);
+
+ /**
+ * Replace any existing conflicting FieldRef with the shortest (closest to root) one
+ */
+ void keepShortest(const FieldRef* toInsert);
+
+ /**
* Find all inserted fields which conflict with the FieldRef 'toCheck' by the semantics
* of 'insert', and add those fields to the 'conflicts' set.
*/
@@ -89,6 +101,11 @@ namespace mongo {
_fieldSet.clear();
}
+ /**
+ * A debug/log-able string
+ */
+ const std::string toString() const;
+
private:
// A set of field_ref pointers, none of which is owned here.
FieldSet _fieldSet;
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index a716fbf2a20..ecbf3b9011c 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -65,6 +65,7 @@
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/query.h"
#include "mongo/db/ops/update.h"
+#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/ops/update_driver.h"
#include "mongo/db/pagefault.h"
#include "mongo/db/repl/is_master.h"
@@ -653,17 +654,6 @@ namespace mongo {
if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
return;
- // See if we have any sharding keys, and if we find that we do, inject them
- // into the driver. If we don't, the empty BSONObj will reset any shard key
- // state in the driver.
- BSONObj shardKeyPattern;
- if (shardingState.needCollectionMetadata( ns ) ) {
- const CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns );
- if ( metadata )
- shardKeyPattern = metadata->getKeyPattern();
- }
- driver.refreshShardKeyPattern( shardKeyPattern );
-
Client::Context ctx( ns );
const NamespaceString requestNs(ns);
@@ -674,7 +664,8 @@ namespace mongo {
request.setQuery(query);
request.setUpdates(toupdate);
request.setUpdateOpLog(); // TODO: This is wasteful if repl is not active.
-
+ UpdateLifecycleImpl updateLifecycle(broadcast, requestNs);
+ request.setLifecycle(&updateLifecycle);
UpdateResult res = update(request, &op.debug(), &driver);
// for getlasterror
@@ -1340,7 +1331,6 @@ namespace {
<< "*************";
}
-
}
}
else {
diff --git a/src/mongo/db/jsobj.cpp b/src/mongo/db/jsobj.cpp
index 3d75d4d47e8..be60c2aed8a 100644
--- a/src/mongo/db/jsobj.cpp
+++ b/src/mongo/db/jsobj.cpp
@@ -892,8 +892,10 @@ namespace mongo {
return b.obj();
}
- Status BSONObj::_okForStorage(bool root) const {
+ Status BSONObj::_okForStorage(bool root, bool deep) const {
BSONObjIterator i( *this );
+
+ // The first field is special in the case of a DBRef where the first field must be $ref
bool first = true;
while ( i.more() ) {
BSONElement e = i.next();
@@ -945,12 +947,12 @@ namespace mongo {
<< typeName(e.type()));
}
- if ( e.mayEncapsulate() ) {
+ if ( deep && e.mayEncapsulate() ) {
switch ( e.type() ) {
case Object:
case Array:
{
- Status s = e.embeddedObject()._okForStorage(false);
+ Status s = e.embeddedObject()._okForStorage(false, true);
// TODO: combine field names for better error messages
if ( ! s.isOK() )
return s;
@@ -958,7 +960,7 @@ namespace mongo {
break;
case CodeWScope:
{
- Status s = e.codeWScopeObject()._okForStorage(false);
+ Status s = e.codeWScopeObject()._okForStorage(false, true);
// TODO: combine field names for better error messages
if ( ! s.isOK() )
return s;
@@ -968,6 +970,8 @@ namespace mongo {
uassert( 12579, "unhandled cases in BSONObj okForStorage" , 0 );
}
}
+
+ // After we have processed one field, we are no longer on the first field
first = false;
}
return Status::OK();
diff --git a/src/mongo/db/ops/SConscript b/src/mongo/db/ops/SConscript
index cff70d3760e..547dc0e1c36 100644
--- a/src/mongo/db/ops/SConscript
+++ b/src/mongo/db/ops/SConscript
@@ -222,6 +222,7 @@ env.CppUnitTest(
target='update_driver_test',
source='update_driver_test.cpp',
LIBDEPS=[
+ '$BUILD_DIR/mongo/mutable_bson_test_utils',
'update_driver',
],
)
diff --git a/src/mongo/db/ops/field_checker.cpp b/src/mongo/db/ops/field_checker.cpp
index c7dd86f4e0c..26769e9642e 100644
--- a/src/mongo/db/ops/field_checker.cpp
+++ b/src/mongo/db/ops/field_checker.cpp
@@ -38,83 +38,26 @@ namespace mongo {
namespace fieldchecker {
- namespace {
-
- Status isUpdatable(const FieldRef& field, bool legacy) {
- const size_t numParts = field.numParts();
-
- if (numParts == 0) {
- return Status(ErrorCodes::EmptyFieldName,
- "An empty update path is not valid.");
- }
-
- for (size_t i = 0; i != numParts; ++i) {
- const StringData part = field.getPart(i);
-
- if ((i == 0) && part.compare("_id") == 0) {
- return Status(ErrorCodes::ImmutableIdField,
- mongoutils::str::stream() << "The update path '"
- << field.dottedField()
- << "' contains the '_id' field, which cannot be updated.");
- }
-
- if (part.empty()) {
- return Status(ErrorCodes::EmptyFieldName,
- mongoutils::str::stream() << "The update path '"
- << field.dottedField()
- << "' contains an empty field, which is not allowed.");
- }
-
- if (!legacy && (part[0] == '$')) {
-
- // A 'bare' dollar sign not in the first position is a positional
- // update token, so it is not an error.
- //
- // TODO: In 'isPositional' below, we redo a very similar walk and check.
- // Perhaps we should fuse these operations, and have isUpdatable take a
- // 'PositionalContext' object to be populated with information about any
- // discovered positional ops.
- const bool positional = ((i != 0) && (part.size() == 1));
-
- if (!positional) {
-
- // We ignore the '$'-prefixed names that are part of a DBRef, because
- // we don't have enough context here to validate that we have a proper
- // DBRef. Errors with the DBRef will be caught upstream when
- // okForStorage is invoked.
- //
- // TODO: We need to find a way to consolidate this checking with that
- // done in okForStorage. There is too much duplication between this
- // code and that code.
- const bool mightBePartOfDbRef = (i != 0) &&
- (part == "$db" ||
- part == "$id" ||
- part == "$ref");
-
- if (!mightBePartOfDbRef)
- return Status(ErrorCodes::DollarPrefixedFieldName,
- mongoutils::str::stream() << "The update path '"
- << field.dottedField()
- << "' contains an illegal field name "
- "(field name starts with '$').");
+ Status isUpdatable(const FieldRef& field) {
+ const size_t numParts = field.numParts();
- }
+ if (numParts == 0) {
+ return Status(ErrorCodes::EmptyFieldName,
+ "An empty update path is not valid.");
+ }
- }
+ for (size_t i = 0; i != numParts; ++i) {
+ const StringData part = field.getPart(i);
+ if (part.empty()) {
+ return Status(ErrorCodes::EmptyFieldName,
+ mongoutils::str::stream() << "The update path '"
+ << field.dottedField()
+ << "' contains an empty field, which is not allowed.");
}
-
- return Status::OK();
}
- } // namespace
-
- Status isUpdatable(const FieldRef& field) {
- return isUpdatable(field, false);
- }
-
- Status isUpdatableLegacy(const FieldRef& field) {
- return isUpdatable(field, true);
+ return Status::OK();
}
bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count) {
diff --git a/src/mongo/db/ops/field_checker.h b/src/mongo/db/ops/field_checker.h
index fbad64e7a42..544ea131074 100644
--- a/src/mongo/db/ops/field_checker.h
+++ b/src/mongo/db/ops/field_checker.h
@@ -39,20 +39,12 @@ namespace mongo {
/**
* Returns OK if all the below conditions on 'field' are valid:
* + Non-empty
- * + Not the _id field (or a subfield of the _id field, such as _id.x.y)
* + Does not start or end with a '.'
- * + Does not start with a $ (unless a DBRef part $id/$ref/$db not at the root)
* Otherwise returns a code indicating cause of failure.
*/
Status isUpdatable(const FieldRef& field);
/**
- * Same behavior of isUpdatable but allowing update fields to start with '$'. This
- * supports $unset on legacy fields.
- */
- Status isUpdatableLegacy(const FieldRef& field);
-
- /**
* Returns true, the position 'pos' of the first $-sign if present in 'fieldRef', and
* how many other $-signs were found in 'count'. Otherwise return false.
*
diff --git a/src/mongo/db/ops/field_checker_test.cpp b/src/mongo/db/ops/field_checker_test.cpp
index 9e4d87338c7..83c99bc4c9b 100644
--- a/src/mongo/db/ops/field_checker_test.cpp
+++ b/src/mongo/db/ops/field_checker_test.cpp
@@ -26,7 +26,6 @@ namespace {
using mongo::ErrorCodes;
using mongo::FieldRef;
using mongo::fieldchecker::isUpdatable;
- using mongo::fieldchecker::isUpdatableLegacy;
using mongo::fieldchecker::isPositional;
using mongo::Status;
@@ -51,10 +50,13 @@ namespace {
fieldRefDot.parse(".");
ASSERT_NOT_OK(isUpdatable(fieldRefDot));
+ /* TODO: Re-enable after review
FieldRef fieldRefDollar;
fieldRefDollar.parse("$");
ASSERT_NOT_OK(isUpdatable(fieldRefDollar));
+*/
+
FieldRef fieldRefADot;
fieldRefADot.parse("a.");
ASSERT_NOT_OK(isUpdatable(fieldRefADot));
@@ -68,154 +70,7 @@ namespace {
ASSERT_NOT_OK(isUpdatable(fieldRefEmptyMiddle));
}
- TEST(IsUpdatable, SpecialIDField) {
- FieldRef fieldRefID;
- fieldRefID.parse("_id");
- ASSERT_NOT_OK(isUpdatable(fieldRefID));
-
- FieldRef fieldRefIDX;
- fieldRefIDX.parse("_id.x");
- ASSERT_NOT_OK(isUpdatable(fieldRefIDX));
-
- FieldRef fieldRefXID;
- fieldRefXID.parse("x._id");
- ASSERT_OK(isUpdatable(fieldRefXID));
-
- FieldRef fieldRefXIDZ;
- fieldRefXIDZ.parse("x._id.z");
- ASSERT_OK(isUpdatable(fieldRefXIDZ));
- }
-
- TEST(IsUpdatable, PositionalFields) {
- FieldRef fieldRefXDollar;
- fieldRefXDollar.parse("x.$");
- ASSERT_OK(isUpdatable(fieldRefXDollar));
-
- FieldRef fieldRefXDollarZ;
- fieldRefXDollarZ.parse("x.$.z");
- ASSERT_OK(isUpdatable(fieldRefXDollarZ));
-
- // A document never starts with an array.
- FieldRef fieldRefDollarB;
- fieldRefDollarB.parse("$.b");
- ASSERT_NOT_OK(isUpdatable(fieldRefDollarB));
-
- FieldRef fieldRefDollar;
- fieldRefDollar.parse("$foo");
- ASSERT_NOT_OK(isUpdatable(fieldRefDollar));
- }
-
- TEST(IsUpdatable, DollarPrefixedDeepFields) {
- FieldRef fieldRefLateDollar;
- fieldRefLateDollar.parse("a.$b");
- ASSERT_NOT_OK(isUpdatable(fieldRefLateDollar));
- }
-
- // DBRef related tests
-
- // Allowed
- TEST(IsUpdatable, DBRefIdIgnored) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$id");
- ASSERT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefDbIgnored) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$db");
- ASSERT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefRefIgnored) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$ref");
- ASSERT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefIdIgnoredNested) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.b.$id");
- ASSERT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefDbIgnoredNested) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.b.$db");
- ASSERT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefRefIgnoredNested) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.b.$ref");
- ASSERT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefIdRoot) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("$id");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefRefRoot) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("$ref");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefDbRoot) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("$db");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefIdLike) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("$idasd");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefRefLike) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("$refasd");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefDbLike) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("$dbasd");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefIdLikeNested) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$idasd");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefRefLikeNested) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$refasd");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefDbLikeNested) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$dbasd");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefDbCase1) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$Db");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
- TEST(IsUpdatable, DBRefDbCase2) {
- FieldRef fieldRefDBRef;
- fieldRefDBRef.parse("a.$DB");
- ASSERT_NOT_OK(isUpdatable(fieldRefDBRef));
- }
-
+ // Positional checks
TEST(isPositional, EntireArrayItem) {
FieldRef fieldRefPositional;
fieldRefPositional.parse("a.$");
@@ -245,30 +100,4 @@ namespace {
ASSERT_EQUALS(pos, 1u);
ASSERT_EQUALS(count, 2u);
}
-
- // Legacy tests which allow $prefix paths
- TEST(IsUpdatableLegacy, Basics) {
- FieldRef fieldRefDollar;
- fieldRefDollar.parse("$foo");
- ASSERT_OK(isUpdatableLegacy(fieldRefDollar));
- }
-
- TEST(IsUpdatableLegacy, DollarPrefixedNested) {
- FieldRef fieldRefLateDollar;
- fieldRefLateDollar.parse("a.$b");
- ASSERT_OK(isUpdatableLegacy(fieldRefLateDollar));
- }
-
- TEST(IsUpdatableLegacy, DollarPrefixedDeepNested) {
- FieldRef fieldRefLateDollar;
- fieldRefLateDollar.parse("a.b.$c");
- ASSERT_OK(isUpdatableLegacy(fieldRefLateDollar));
- }
-
- TEST(IsUpdatableLegacy, PositionalDollarPrefixed) {
- FieldRef fieldRefLateDollar;
- fieldRefLateDollar.parse("a.$.$b");
- ASSERT_OK(isUpdatableLegacy(fieldRefLateDollar));
- }
-
} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_object_replace.cpp b/src/mongo/db/ops/modifier_object_replace.cpp
index 7b312a34a99..a7092064043 100644
--- a/src/mongo/db/ops/modifier_object_replace.cpp
+++ b/src/mongo/db/ops/modifier_object_replace.cpp
@@ -91,24 +91,6 @@ namespace mongo {
<< modExpr.type());
}
- if (opts.enforceOkForStorage) {
- Status s = modExpr.embeddedObject().storageValid();
- if (!s.isOK())
- return s;
- } else {
- // storageValid checks for $-prefixed field names, so only run if we didn't do that.
- BSONObjIterator it(modExpr.embeddedObject());
- while (it.more()) {
- BSONElement elem = it.next();
- if (*elem.fieldName() == '$') {
- return Status(ErrorCodes::BadValue,
- str::stream() << "A replace document can't"
- " contain update modifiers: "
- << elem.fieldNameStringData());
- }
- }
- }
-
// We make a copy of the object here because the update driver does not guarantees, in
// the case of object replacement, that the modExpr is going to outlive this mod.
_val = modExpr.embeddedObject().getOwned();
@@ -165,6 +147,10 @@ namespace mongo {
// Do not duplicate _id field
if (srcIdElement.ok()) {
+ if (srcIdElement.compareWithBSONElement(dstIdElement, true) != 0) {
+ return Status(ErrorCodes::ImmutableField,
+ "The _id field cannot be changed.");
+ }
continue;
}
}
diff --git a/src/mongo/db/ops/modifier_object_replace_test.cpp b/src/mongo/db/ops/modifier_object_replace_test.cpp
index cd4d7c971b3..bfdd2f9dfc4 100644
--- a/src/mongo/db/ops/modifier_object_replace_test.cpp
+++ b/src/mongo/db/ops/modifier_object_replace_test.cpp
@@ -235,7 +235,16 @@ namespace {
ModifierInterface::ExecInfo execInfo;
ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
ASSERT_FALSE(execInfo.noOp);
+ ASSERT_NOT_OK(mod.apply());
+ }
+ TEST(IdImmutable, ReplaceIdNumberSameVal){
+ Document doc(fromjson("{_id:1, a:1}"));
+ Mod mod(fromjson("{_id:2}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
ASSERT_NOT_OK(mod.apply());
}
@@ -246,78 +255,7 @@ namespace {
ModifierInterface::ExecInfo execInfo;
ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
ASSERT_FALSE(execInfo.noOp);
-
ASSERT_NOT_OK(mod.apply());
}
- // Test for bad paths
- TEST(ValidatePath, FailDottedField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{'a.a':10}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(ValidatePath, FailEmbeddedDottedField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{a:{'a.a':10}}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(ValidatePath, FailDollarPrefixField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{$a:10}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(ValidatePath, FailEmbeddedDollarPrefixField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{a:{$foo:1}}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(ValidatePath, FailArrayIdField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{_id:[9]}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- input = fromjson("{blah:1, _id:[9]}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(ValidatePath, FailRegexIdField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{_id: /a/}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- input = fromjson("{foo:1, _id: /a/}");
- ASSERT_NOT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- // These are similar to ones above but are allowed for embedded doc/elements
- TEST(ValidatePath, EmbeddedArrayIdField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{a: {_id:[10]}}");
- ASSERT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- input = fromjson("{a: {blah:1, _id:[10]}}");
- ASSERT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(ValidatePath, HasEmbeddedRegexIdField){
- ModifierObjectReplace mod;
- BSONObj input = fromjson("{a: {_id: /a/, hi:1}}");
- ASSERT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- input = fromjson("{a: {hi:1, _id: /a/}}");
- ASSERT_OK(mod.init(BSON("" << input).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_push.cpp b/src/mongo/db/ops/modifier_push.cpp
index c7e099c98f1..106965bfb6f 100644
--- a/src/mongo/db/ops/modifier_push.cpp
+++ b/src/mongo/db/ops/modifier_push.cpp
@@ -114,18 +114,6 @@ namespace mongo {
}
}
- // Check if no invalid data (such as fields with '$'s) are being used in the $each
- // clause.
- BSONObjIterator itEach(eachElem->embeddedObject());
- while (itEach.more()) {
- BSONElement eachItem = itEach.next();
- if (eachItem.type() == Object || eachItem.type() == Array) {
- Status s = eachItem.Obj().storageValidEmbedded();
- if (!s.isOK())
- return s;
- }
- }
-
// Slice, sort, position are optional and may be present in any order.
bool seenSlice = false;
bool seenSort = false;
@@ -244,12 +232,6 @@ namespace mongo {
switch (modExpr.type()) {
case Array:
- {
- Status s = modExpr.Obj().storageValidEmbedded();
- if (!s.isOK())
- return s;
- }
-
if (_pushMode == PUSH_ALL) {
_eachMode = true;
Status status = parseEachMode(PUSH_ALL,
@@ -289,10 +271,6 @@ namespace mongo {
}
}
else {
- Status s = modExpr.Obj().storageValidEmbedded();
- if (!s.isOK())
- return s;
-
_val = modExpr;
}
break;
diff --git a/src/mongo/db/ops/modifier_push_test.cpp b/src/mongo/db/ops/modifier_push_test.cpp
index ba8d602d298..88811026205 100644
--- a/src/mongo/db/ops/modifier_push_test.cpp
+++ b/src/mongo/db/ops/modifier_push_test.cpp
@@ -168,13 +168,6 @@ namespace {
ModifierInterface::Options::normal()));
}
- TEST(Init, SimplePushNotOkForStorage) {
- BSONObj modObj = fromjson("{$push: {$inc: {x: 1}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
//
// If present, is the $each clause valid?
//
@@ -209,15 +202,6 @@ namespace {
ModifierInterface::Options::normal()));
}
- // TODO Should we check this? We currently don't.
- // TEST(Init, PushEachDotted) {
- // // Push does not create structure; so dotted fields are invalid
- // BSONObj modObj = fromjson("{$push: {x: {$each: [{'a.b': 1}]}}}}");
- // ModifierPush mod;
- // ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- // ModifierInterface::Options::normal()));
- // }
-
TEST(Init, PushEachEmpty) {
BSONObj modObj = fromjson("{$push: {x: {$each: []}}}");
ModifierPush mod;
@@ -225,14 +209,6 @@ namespace {
ModifierInterface::Options::normal()));
}
- TEST(Init, PushEachWithNotOkForStorage) {
- // $each items shouldn't contain '$' in field names.
- BSONObj modObj = fromjson("{$push: {x: {$each: [{$inc: {b: 1}}]}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
TEST(Init, PushEachInvalidType) {
// $each must be an array.
BSONObj modObj = fromjson("{$push: {x: {$each: {b: 1}}}}");
diff --git a/src/mongo/db/ops/modifier_rename.cpp b/src/mongo/db/ops/modifier_rename.cpp
index d2114e598b1..4cb643f297b 100644
--- a/src/mongo/db/ops/modifier_rename.cpp
+++ b/src/mongo/db/ops/modifier_rename.cpp
@@ -86,13 +86,11 @@ namespace mongo {
// Extract the field names from the mod expression
_fromFieldRef.parse(modExpr.fieldName());
- // The 'from' field is checked with legacy so that we can rename away from malformed values.
- Status status = fieldchecker::isUpdatableLegacy(_fromFieldRef);
+ Status status = fieldchecker::isUpdatable(_fromFieldRef);
if (!status.isOK())
return status;
_toFieldRef.parse(modExpr.String());
- // The 'to' field is checked normally so we can't create new malformed values.
status = fieldchecker::isUpdatable(_toFieldRef);
if (!status.isOK())
return status;
@@ -159,15 +157,16 @@ namespace mongo {
// Ensure no array in ancestry if what we found is not at the root
mutablebson::Element curr = _preparedState->fromElemFound.parent();
- while (curr.ok()) {
- if (curr.getType() == Array)
- return Status(ErrorCodes::BadValue,
- str::stream() << "The source field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
- << findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName() << "'");
- curr = curr.parent();
- }
+ if (curr != curr.getDocument().root())
+ while (curr.ok() && (curr != curr.getDocument().root())) {
+ if (curr.getType() == Array)
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The source field cannot be an array element, '"
+ << _fromFieldRef.dottedField() << "' in doc with "
+ << findElementNamed(root.leftChild(), "_id").toString()
+ << " has an array field called '" << curr.getFieldName() << "'");
+ curr = curr.parent();
+ }
// "To" side validation below
@@ -176,29 +175,30 @@ namespace mongo {
&_preparedState->toIdxFound,
&_preparedState->toElemFound);
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
+ // FindLongestPrefix may return not viable or any other error and then we cannot proceed.
if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->toElemFound = root.getDocument().end();
+ // Not an error condition as we will create the "to" path as needed.
} else if (!status.isOK()) {
return status;
}
- // Ensure no array in ancestry of to Element
- // Set to either parent, or node depending if the full path element was found
- curr = (_preparedState->toElemFound != root.getDocument().end() ?
- _preparedState->toElemFound.parent() :
- _preparedState->toElemFound);
- curr = _preparedState->toElemFound;
- while (curr.ok()) {
- if (curr.getType() == Array)
- return Status(ErrorCodes::BadValue,
- str::stream() << "The destination field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
- << findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName() << "'");
- curr = curr.parent();
+ const bool destExists = _preparedState->toElemFound.ok() &&
+ (_preparedState->toIdxFound == (_toFieldRef.numParts()-1));
+
+ // Ensure no array in ancestry of "to" Element
+ // Set to either parent, or node depending on if the full path element was found
+ curr = (destExists ? _preparedState->toElemFound.parent() : _preparedState->toElemFound);
+ if (curr != curr.getDocument().root()) {
+ while (curr.ok()) {
+ if (curr.getType() == Array)
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "The destination field cannot be an array element, '"
+ << _fromFieldRef.dottedField() << "' in doc with "
+ << findElementNamed(root.leftChild(), "_id").toString()
+ << " has an array field called '" << curr.getFieldName() << "'");
+ curr = curr.parent();
+ }
}
// We register interest in the field name. The driver needs this info to sort out if
diff --git a/src/mongo/db/ops/modifier_rename_test.cpp b/src/mongo/db/ops/modifier_rename_test.cpp
index 6020216f6ab..0c1b3310264 100644
--- a/src/mongo/db/ops/modifier_rename_test.cpp
+++ b/src/mongo/db/ops/modifier_rename_test.cpp
@@ -88,9 +88,8 @@ namespace {
/**
* These test negative cases:
* -- No '$' support for positional operator
- * -- Cannot move immutable field, or parts, of '_id' field
* -- No empty field names (ex. .a, b. )
- * -- Can't rename to an invalid fieldname.
+ * -- Can't rename to an invalid fieldname (empty fieldname part)
*/
TEST(InvalidInit, FromDbTests) {
ModifierRename mod;
@@ -98,14 +97,6 @@ namespace {
ModifierInterface::Options::normal()));
ASSERT_NOT_OK(mod.init(fromjson("{'a':'b.$'}").firstElement(),
ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'_id.a':'b'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'b':'_id.a'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'_id.a':'_id.b'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'_id.b':'_id.a'}").firstElement(),
- ModifierInterface::Options::normal()));
ASSERT_NOT_OK(mod.init(fromjson("{'.b':'a'}").firstElement(),
ModifierInterface::Options::normal()));
ASSERT_NOT_OK(mod.init(fromjson("{'b.':'a'}").firstElement(),
@@ -114,8 +105,6 @@ namespace {
ModifierInterface::Options::normal()));
ASSERT_NOT_OK(mod.init(fromjson("{'b':'a.'}").firstElement(),
ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'a':'$a'}").firstElement(),
- ModifierInterface::Options::normal()));
}
TEST(MissingFrom, InitPrepLog) {
@@ -321,6 +310,51 @@ namespace {
ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
}
+ TEST(Arrays, ReplaceArrayField) {
+ Document doc(fromjson("{a: 2, b: []}"));
+ Mod setMod(fromjson("{$rename: {'a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b:2}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b': 2}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+ }
+
+
+ TEST(Arrays, ReplaceWithArrayField) {
+ Document doc(fromjson("{a: [], b: 2}"));
+ Mod setMod(fromjson("{$rename: {'a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b:[]}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b': []}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+ }
+
TEST(LegacyData, CanRenameFromInvalidFieldName) {
Document doc(fromjson("{$a: 2}"));
Mod setMod(fromjson("{$rename: {'$a':'a'}}"));
diff --git a/src/mongo/db/ops/modifier_set.cpp b/src/mongo/db/ops/modifier_set.cpp
index 65033091e45..3f3eee41e27 100644
--- a/src/mongo/db/ops/modifier_set.cpp
+++ b/src/mongo/db/ops/modifier_set.cpp
@@ -109,25 +109,9 @@ namespace mongo {
// value analysis
//
- // Is the target set value safe?
- switch (modExpr.type()) {
-
- case EOO:
+ if (!modExpr.ok())
return Status(ErrorCodes::BadValue, "cannot $set an empty value");
- case Object:
- case Array:
- if (opts.enforceOkForStorage) {
- Status s = modExpr.Obj().storageValidEmbedded();
- if (!s.isOK())
- return s;
- }
- break;
-
- default:
- break;
- }
-
_val = modExpr;
_modOptions = opts;
diff --git a/src/mongo/db/ops/modifier_set_test.cpp b/src/mongo/db/ops/modifier_set_test.cpp
index df47c6ba012..48dbf556ba3 100644
--- a/src/mongo/db/ops/modifier_set_test.cpp
+++ b/src/mongo/db/ops/modifier_set_test.cpp
@@ -103,13 +103,6 @@ namespace {
ModifierInterface::Options::normal() ));
}
- TEST(Init, NotOkForStorage) {
- BSONObj modObj = fromjson("{$set: {a: {$inc: {b: 1}}}}");
- ModifierSet mod;
- ASSERT_NOT_OK(mod.init(modObj["$set"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal() ));
- }
-
//
// Simple Mods
//
diff --git a/src/mongo/db/ops/modifier_unset.cpp b/src/mongo/db/ops/modifier_unset.cpp
index 48516f825a2..2e55fe0b5e8 100644
--- a/src/mongo/db/ops/modifier_unset.cpp
+++ b/src/mongo/db/ops/modifier_unset.cpp
@@ -83,7 +83,7 @@ namespace mongo {
// Perform standard field name and updateable checks.
_fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatableLegacy(_fieldRef);
+ Status status = fieldchecker::isUpdatable(_fieldRef);
if (! status.isOK()) {
return status;
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 50de9416777..30a65eb855a 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -34,6 +34,7 @@
#include <cstring> // for memcpy
+#include "mongo/bson/mutable/algorithm.h"
#include "mongo/bson/mutable/damage_vector.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/client/dbclientinterface.h"
@@ -41,54 +42,232 @@
#include "mongo/db/index_set.h"
#include "mongo/db/namespace_details.h"
#include "mongo/db/ops/update_driver.h"
+#include "mongo/db/ops/update_lifecycle.h"
#include "mongo/db/pagefault.h"
#include "mongo/db/pdfile.h"
+#include "mongo/db/query_optimizer.h"
+#include "mongo/db/query_runner.h"
#include "mongo/db/query/new_find.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/runner_yield_policy.h"
-#include "mongo/db/query_optimizer.h"
-#include "mongo/db/query_runner.h"
#include "mongo/db/queryutil.h"
+#include "mongo/db/repl/oplog.h"
#include "mongo/db/storage/record.h"
#include "mongo/db/structure/collection.h"
-#include "mongo/db/repl/oplog.h"
#include "mongo/platform/unordered_set.h"
namespace mongo {
+ namespace mb = mutablebson;
namespace {
+ const char idFieldName[] = "_id";
+
// TODO: Make this a function on NamespaceString, or make it cleaner.
- inline void validateUpdate( const char* ns , const BSONObj& updateobj, const BSONObj& patternOrig ) {
- uassert( 10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0 );
- if ( strstr(ns, ".system.") ) {
+ inline void validateUpdate(const char* ns ,
+ const BSONObj& updateobj,
+ const BSONObj& patternOrig) {
+ uassert(10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0);
+ if (strstr(ns, ".system.")) {
/* dm: it's very important that system.indexes is never updated as IndexDetails
has pointers into it */
- uassert( 10156,
+ uassert(10156,
str::stream() << "cannot update system collection: "
<< ns << " q: " << patternOrig << " u: " << updateobj,
- legalClientSystemNS( ns , true ) );
+ legalClientSystemNS(ns , true));
}
}
/**
- * return a BSONObj with the _id field of the doc passed in. If no _id and multi, error.
+ * This will verify that all updated fields are
+ * 1.) Valid for storage (checking parent to make sure things like DBRefs are valid)
+ * 2.) Compare updated immutable fields do not change values
+ *
+ * If updateFields is empty then it was replacement and/or we need to check all fields
*/
- BSONObj makeOplogEntryQuery(const BSONObj doc, bool multi) {
- BSONObjBuilder idPattern;
- BSONElement id;
- // NOTE: If the matching object lacks an id, we'll log
- // with the original pattern. This isn't replay-safe.
- // It might make sense to suppress the log instead
- // if there's no id.
- if ( doc.getObjectID( id ) ) {
- idPattern.append( id );
- return idPattern.obj();
+ inline Status validate(const bool idRequired,
+ const BSONObj& original,
+ const FieldRefSet& updatedFields,
+ const mb::Document& updated,
+ const std::vector<FieldRef*>* immutableAndSingleValueFields,
+ const ModifierInterface::Options& opts) {
+
+ LOG(3) << "update validate options -- "
+ << " id required: " << idRequired
+ << " updatedFields: " << updatedFields
+ << " immutableAndSingleValueFields.size:"
+ << (immutableAndSingleValueFields ? immutableAndSingleValueFields->size() : 0)
+ << " fromRepl: " << opts.fromReplication
+ << " validate:" << opts.enforceOkForStorage;
+
+ // 1.) Loop through each updated field and validate for storage
+ // and detect immutable updates
+
+ // Once we check the root once, there is no need to do it again
+ bool checkedRoot = false;
+
+ // TODO: Replace with mutablebson implementation -- this is wasteful to make a copy.
+ BSONObj doc = updated.getObject();
+
+ // The set of possibly changed immutable fields -- we will need to check their vals
+ FieldRefSet changedImmutableFields;
+
+ // Check to see if there were no fields specified or if we are not validating
+ // The case if a range query, or query that didn't result in saved fields
+ if (updatedFields.empty() || !opts.enforceOkForStorage) {
+ if (opts.enforceOkForStorage) {
+ // No specific fields were updated so the whole doc must be checked
+ Status s = doc.storageValid(true);
+ if (!s.isOK())
+ return s;
+ }
+
+ // Check all immutable fields
+ if (immutableAndSingleValueFields)
+ changedImmutableFields.fillFrom(*immutableAndSingleValueFields);
}
else {
- uassert( 10157, "multi-update requires all modified objects to have an _id" , ! multi );
- return doc;
+
+ // TODO: Change impl so we don't need to create a new FieldRefSet
+ // -- move all conflict logic into static function on FieldRefSet?
+ FieldRefSet immutableFieldRef;
+ if (immutableAndSingleValueFields)
+ immutableFieldRef.fillFrom(*immutableAndSingleValueFields);
+
+
+ FieldRefSet::const_iterator where = updatedFields.begin();
+ const FieldRefSet::const_iterator end = updatedFields.end();
+ for( ; where != end; ++where) {
+ const FieldRef& current = **where;
+
+ //TODO: don't check paths more than once
+
+ const bool isTopLevelField = (current.numParts() == 1);
+ if (isTopLevelField) {
+ // We check the top level since the current implementation checks BSONObj,
+ // not BSONElements. NOTE: in the mutablebson version we can just check elem
+ if (!checkedRoot) {
+ // Check the root level (top level fields) only once, and don't go deep
+ Status s = doc.storageValid(false);
+ if (!s.isOK()) {
+ return s;
+ }
+ checkedRoot = true;
+ }
+
+ // Check if the updated field conflicts with immutable fields
+ immutableFieldRef.getConflicts(&current, &changedImmutableFields);
+
+ // Traverse (deep)
+ BSONElement elem = doc.getFieldDotted(current.dottedField());
+ if (elem.isABSONObj()) {
+ Status s = elem.embeddedObject().storageValid(true);
+ if (!s.isOK()) {
+ return s;
+ }
+ }
+ }
+ else {
+ // Remove the right-most part, to get the parent.
+ // "a.b.c" -> "a.b"
+ StringData path = current.dottedField().substr(
+ 0,
+ current.dottedField().size() -
+ current.getPart(current.numParts() - 1).size() - 1);
+
+ BSONElement elem = doc.getFieldDotted(path);
+ Status s = elem.embeddedObject().storageValid(true);
+ if (!s.isOK()) {
+ return s;
+ }
+
+ }
+ }
+ }
+
+ LOG(4) << "Changed immutable fields: " << changedImmutableFields;
+ // 2.) Now compare values of the changed immutable fields (to make sure they haven't)
+
+ const mutablebson::ConstElement newIdElem = updated.root()[idFieldName];
+
+ // Add _id to fields to check since it too is immutable
+ FieldRef idFR;
+ idFR.parse(idFieldName);
+ changedImmutableFields.keepShortest(&idFR);
+
+ FieldRefSet::const_iterator where = changedImmutableFields.begin();
+ const FieldRefSet::const_iterator end = changedImmutableFields.end();
+ for( ; where != end; ++where ) {
+ const FieldRef& current = **where;
+
+ // Find the updated field in the updated document.
+ mutablebson::ConstElement newElem = updated.root();
+ size_t currentPart = 0;
+ while (newElem.ok() && currentPart < current.numParts())
+ newElem = newElem[current.getPart(currentPart++)];
+
+ if (!newElem.ok()) {
+ if (original.isEmpty()) {
+ // If the _id is missing and not required, then skip this check
+ if (!(current.dottedField() == idFieldName && idRequired))
+ return Status(ErrorCodes::NoSuchKey,
+ mongoutils::str::stream()
+ << "After applying the update, the new"
+ << " document was missing the '"
+ << current.dottedField()
+ << "' (required and immutable) field.");
+
+ }
+ else {
+ if (current.dottedField() != idFieldName ||
+ (current.dottedField() != idFieldName && idRequired))
+ return Status(ErrorCodes::ImmutableField,
+ mongoutils::str::stream()
+ << "After applying the update to the document with "
+ << newIdElem.toString()
+ << ", the '" << current.dottedField()
+ << "' (required and immutable) field was "
+ "found to have been removed --"
+ << original);
+ }
+ }
+ else {
+
+ // Find the potentially affected field in the original document.
+ const BSONElement oldElem = original.getFieldDotted(current.dottedField());
+ const BSONElement oldIdElem = original.getField(idFieldName);
+
+ // Ensure no arrays since neither _id nor shard keys can be in an array, or one.
+ mb::ConstElement currElem = newElem;
+ while (currElem.ok()) {
+ if (currElem.getType() == Array) {
+ return Status(ErrorCodes::NotSingleValueField,
+ mongoutils::str::stream()
+ << "After applying the update to the document {"
+ << (oldIdElem.ok() ? oldIdElem.toString() :
+ newIdElem.toString())
+ << " , ...}, the (immutable) field '"
+ << current.dottedField()
+ << "' was found to be an array or array descendant.");
+ }
+ currElem = currElem.parent();
+ }
+
+ // If we have both (old and new), compare them. If we just have new we are good
+ if (oldElem.ok() && newElem.compareWithBSONElement(oldElem, false) != 0) {
+ return Status(ErrorCodes::ImmutableField,
+ mongoutils::str::stream()
+ << "After applying the update to the document {"
+ << (oldIdElem.ok() ? oldIdElem.toString() :
+ newIdElem.toString())
+ << " , ...}, the (immutable) field '" << current.dottedField()
+ << "' was found to have been altered to "
+ << newElem.toString());
+ }
+ }
}
+
+ return Status::OK();
}
} // namespace
@@ -100,20 +279,21 @@ namespace mongo {
// Config db docs shouldn't get checked for valid field names since the shard key can have
// a dot (".") in it.
bool shouldValidate = !(request.isFromReplication() ||
- request.getNamespaceString().isConfigDB());
+ request.getNamespaceString().isConfigDB() ||
+ request.isFromMigration());
// TODO: Consider some sort of unification between the UpdateDriver, ModifierInterface
// and UpdateRequest structures.
UpdateDriver::Options opts;
opts.multi = request.isMulti();
opts.upsert = request.isUpsert();
- opts.logOp = request.shouldUpdateOpLog();
- opts.modOptions = ModifierInterface::Options( request.isFromReplication(), shouldValidate );
- UpdateDriver driver( opts );
+ opts.logOp = request.shouldCallLogOp();
+ opts.modOptions = ModifierInterface::Options(request.isFromReplication(), shouldValidate);
+ UpdateDriver driver(opts);
- Status status = driver.parse( request.getUpdates() );
- if ( !status.isOK() ) {
- uasserted( 16840, status.reason() );
+ Status status = driver.parse(request.getUpdates());
+ if (!status.isOK()) {
+ uasserted(16840, status.reason());
}
return update(request, opDebug, &driver);
@@ -124,19 +304,20 @@ namespace mongo {
LOG(3) << "processing update : " << request;
const NamespaceString& nsString = request.getNamespaceString();
- validateUpdate( nsString.ns().c_str(), request.getUpdates(), request.getQuery() );
+ validateUpdate(nsString.ns().c_str(), request.getUpdates(), request.getQuery());
- Collection* collection = cc().database()->getCollection( nsString.ns() );
+ Collection* collection = cc().database()->getCollection(nsString.ns());
// TODO: This seems a bit circuitious.
opDebug->updateobj = request.getUpdates();
- if ( collection )
- driver->refreshIndexKeys( collection->infoCache()->indexKeys() );
+ if (request.getLifecycle()) {
+ IndexPathSet indexes;
+ request.getLifecycle()->getIndexKeys(&indexes);
+ driver->refreshIndexKeys(indexes);
+ }
CanonicalQuery* cq;
- // We pass -limit because a positive limit means 'batch size' but negative limit is a
- // hard limit.
if (!CanonicalQuery::canonicalize(nsString, request.getQuery(), &cq).isOK()) {
uasserted(17242, "could not canonicalize query " + request.getQuery().toString());
}
@@ -160,7 +341,7 @@ namespace mongo {
// We record that this will not be an upsert, in case a mod doesn't want to be applied
// when in strict update mode.
- driver->setContext( ModifierInterface::ExecInfo::UPDATE_CONTEXT );
+ driver->setContext(ModifierInterface::ExecInfo::UPDATE_CONTEXT);
int numMatched = 0;
unordered_set<DiskLoc, DiskLoc::Hasher> updatedLocs;
@@ -179,9 +360,10 @@ namespace mongo {
DiskLoc loc;
Runner::RunnerState state;
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&oldObj, &loc))) {
- if ( !isolated && opDebug->nscanned != 0 ) {
+ if (!isolated && opDebug->nscanned != 0) {
if (yieldPolicy.shouldYield()) {
if (!yieldPolicy.yieldAndCheckIfOK(runner.get())) {
+ // TODO: Error?
break;
}
@@ -190,13 +372,18 @@ namespace mongo {
// them here. If we can't do so, escape the update loop. Otherwise, refresh
// the driver so that it knows about what is currently indexed.
- collection = cc().database()->getCollection( nsString.ns() );
- if (NULL == collection) {
- break;
+ const UpdateLifecycle* lifecycle = request.getLifecycle();
+ collection = cc().database()->getCollection(nsString.ns());
+ if (!collection || (lifecycle && !lifecycle->canContinue())) {
+ uasserted(17270,
+ "Update aborted due to invalid state transitions after yield.");
}
- // TODO: This copies the index keys, but it may not need to do so.
- driver->refreshIndexKeys( collection->infoCache()->indexKeys() );
+ if (lifecycle && lifecycle->canContinue()) {
+ IndexPathSet indexes;
+ lifecycle->getIndexKeys(&indexes);
+ driver->refreshIndexKeys(indexes);
+ }
}
}
@@ -218,24 +405,39 @@ namespace mongo {
// place", that is, some values of the old document just get adjusted without any
// change to the binary layout on the bson layer. It may be that a whole new
// document is needed to accomodate the new bson layout of the resulting document.
- doc.reset( oldObj, mutablebson::Document::kInPlaceEnabled );
+ doc.reset(oldObj, mutablebson::Document::kInPlaceEnabled);
BSONObj logObj;
// If there was a matched field, obtain it.
- // XXX: do we always want to do this additional match?
+ // TODO: Only do this when needed (need requirements from update_driver/mods)
MatchDetails matchDetails;
matchDetails.requestElemMatchKey();
+ // TODO: Find out if can move this to the query side so we don't need to double match
verify(cq->root()->matchesBSON(oldObj, &matchDetails));
string matchedField;
if (matchDetails.hasElemMatchKey())
matchedField = matchDetails.elemMatchKey();
- Status status = driver->update( matchedField, &doc, &logObj );
- if ( !status.isOK() ) {
- uasserted( 16837, status.reason() );
+ FieldRefSet updatedFields;
+ Status status = driver->update(matchedField, &doc, &logObj, &updatedFields);
+ if (!status.isOK()) {
+ uasserted(16837, status.reason());
+ }
+
+ dassert(collection->details());
+ const bool idRequired = collection->details()->haveIdIndex();
+
+ // Move _id as first element
+ mb::Element idElem = mb::findFirstChildNamed(doc.root(), idFieldName);
+ if (idElem.ok()) {
+ if (idElem.leftSibling().ok()) {
+ uassertStatusOK(idElem.remove());
+ uassertStatusOK(doc.root().pushFront(idElem));
+ }
}
+
// If the driver applied the mods in place, we can ask the mutable for what
// changed. We call those changes "damages". :) We use the damages to inform the
// journal what was changed, and then apply them to the original document
@@ -251,16 +453,31 @@ namespace mongo {
const char* source = NULL;
bool inPlace = doc.getInPlaceUpdates(&damages, &source);
- // If something changed in the document, verify that no shard keys were altered.
- if ((!inPlace || !damages.empty()) && driver->modsAffectShardKeys())
- uassertStatusOK( driver->checkShardKeysUnaltered (oldObj, doc ) );
+ // If something changed in the document, verify that no immutable fields were changed
+ // and data is valid for storage.
+ if ((!inPlace || !damages.empty()) ) {
+ if (!(request.isFromReplication() || request.isFromMigration())) {
+ const std::vector<FieldRef*>* immutableFields = NULL;
+ if (const UpdateLifecycle* lifecycle = request.getLifecycle())
+ immutableFields = lifecycle->getImmutableFields();
+
+ uassertStatusOK(validate(idRequired,
+ oldObj,
+ updatedFields,
+ doc,
+ immutableFields,
+ driver->modOptions()) );
+ }
+ }
runner->saveState();
- if ( inPlace && !driver->modsAffectIndices() ) {
+ if (inPlace && !driver->modsAffectIndices()) {
+
// If a set of modifiers were all no-ops, we are still 'in place', but there is
// no work to do, in which case we want to consider the object unchanged.
if (!damages.empty() ) {
+
collection->details()->paddingFits();
// All updates were in place. Apply them via durability and writing pointer.
@@ -282,11 +499,11 @@ namespace mongo {
// The updates were not in place. Apply them through the file manager.
newObj = doc.getObject();
- StatusWith<DiskLoc> res = collection->updateDocument( loc,
- newObj,
- true,
- opDebug );
- uassertStatusOK( res.getStatus() );
+ StatusWith<DiskLoc> res = collection->updateDocument(loc,
+ newObj,
+ true,
+ opDebug);
+ uassertStatusOK(res.getStatus());
DiskLoc newLoc = res.getValue();
// If we've moved this object to a new location, make sure we don't apply
@@ -295,16 +512,16 @@ namespace mongo {
// We also take note that the diskloc if the updates are affecting indices.
// Chances are that we're traversing one of them and they may be multi key and
// therefore duplicate disklocs.
- if ( newLoc != loc || driver->modsAffectIndices() ) {
- updatedLocs.insert( newLoc );
+ if (newLoc != loc || driver->modsAffectIndices()) {
+ updatedLocs.insert(newLoc);
}
objectWasChanged = true;
}
- // Log Obj
- if ( request.shouldUpdateOpLog() ) {
- if ( driver->isDocReplacement() || !logObj.isEmpty() ) {
+ // Call logOp if requested.
+ if (request.shouldCallLogOp()) {
+ if (driver->isDocReplacement() || !logObj.isEmpty()) {
BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request.isMulti());
logOp("u", nsString.ns().c_str(), logObj , &idQuery,
NULL, request.isFromMigration(), &newObj);
@@ -329,10 +546,10 @@ namespace mongo {
// TODO: Can this be simplified?
if ((numMatched > 0) || (numMatched == 0 && !request.isUpsert()) ) {
opDebug->nupdated = numMatched;
- return UpdateResult( numMatched > 0 /* updated existing object(s) */,
- !driver->isDocReplacement() /* $mod or obj replacement */,
- numMatched /* # of docments update, even no-ops */,
- BSONObj() );
+ return UpdateResult(numMatched > 0 /* updated existing object(s) */,
+ !driver->isDocReplacement() /* $mod or obj replacement */,
+ numMatched /* # of docments update, even no-ops */,
+ BSONObj());
}
//
@@ -345,92 +562,112 @@ namespace mongo {
// as an insert in the oplog. We don't need the driver's help to build the
// oplog record, then. We also set the context of the update driver to the INSERT_CONTEXT.
// Some mods may only work in that context (e.g. $setOnInsert).
- driver->setLogOp( false );
- driver->setContext( ModifierInterface::ExecInfo::INSERT_CONTEXT );
+ driver->setLogOp(false);
+ driver->setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT);
// Reset the document we will be writing to
doc.reset();
- if ( request.getQuery().hasElement("_id") ) {
- uassertStatusOK(doc.root().appendElement(request.getQuery().getField("_id")));
- }
-
// This remains the empty object in the case of an object replacement, but in the case
// of an upsert where we are creating a base object from the query and applying mods,
- // we capture the query as the original so that we can detect shard key mutations.
+ // we capture the query as the original so that we can detect immutable field mutations.
BSONObj original = BSONObj();
- // If this is a $mod base update, we need to generate a document by examining the
- // query and the mods. Otherwise, we can use the object replacement sent by the user
- // update command that was parsed by the driver before.
- // In the following block we handle the query part, and then do the regular mods after.
- if ( *request.getUpdates().firstElementFieldName() == '$' ) {
- original = request.getQuery();
- uassertStatusOK(UpdateDriver::createFromQuery(original, doc));
+ // Calling createFromQuery will populate the 'doc' with fields from the query which
+ // creates the base of the update for the inserterd doc (because upsert was true)
+ uassertStatusOK(driver->populateDocumentWithQueryFields(request.getQuery(), doc));
+ if (!driver->isDocReplacement()) {
opDebug->fastmodinsert = true;
+ // We need all the fields from the query to compare against for validation below.
+ original = doc.getObject();
}
// Apply the update modifications and then log the update as an insert manually.
- Status status = driver->update( StringData(), &doc, NULL /* no oplog record */);
- if ( !status.isOK() ) {
- uasserted( 16836, status.reason() );
+ FieldRefSet updatedFields;
+ Status status = driver->update(StringData(), &doc, NULL, &updatedFields);
+ if (!status.isOK()) {
+ uasserted(16836, status.reason());
}
- // Validate that the object replacement or modifiers resulted in a document
- // that contains all the shard keys.
- uassertStatusOK( driver->checkShardKeysUnaltered(original, doc) );
-
- BSONObj newObj = doc.getObject();
+ if (!collection) {
+ collection = cc().database()->getCollection(request.getNamespaceString().ns());
+ if (!collection) {
+ collection = cc().database()->createCollection(request.getNamespaceString().ns());
+ }
+ }
- if ( newObj["_id"].eoo() &&
- ( collection == NULL || collection->getIndexCatalog()->findIdIndex() ) ) {
- // TODO: this should move up to before we call doc.getObject()
- // and be done on mutable
- BSONObjBuilder b;
+ dassert(collection->details());
+ const bool idRequired = collection->details()->haveIdIndex();
- OID oid;
- oid.init();
- b.appendOID( "_id", &oid );
+ mb::Element idElem = mb::findFirstChildNamed(doc.root(), idFieldName);
- b.appendElements( newObj );
- newObj = b.obj();
+ // Move _id as first element if it exists
+ if (idElem.ok()) {
+ if (idElem.leftSibling().ok()) {
+ uassertStatusOK(idElem.remove());
+ uassertStatusOK(doc.root().pushFront(idElem));
+ }
}
-
- if ( !collection ) {
- collection = cc().database()->getCollection( request.getNamespaceString().ns() );
- if ( !collection ) {
- collection = cc().database()->createCollection( request.getNamespaceString().ns() );
+ else {
+ // Create _id if an _id is required but the document does not currently have one.
+ if (idRequired) {
+ // TODO: don't search for _id again, get it from above somewhere
+ idElem = doc.makeElementNewOID(idFieldName);
+ if (!idElem.ok())
+ uasserted(17268, "Could not create new _id ObjectId element.");
+ Status s = doc.root().pushFront(idElem);
+ if (!s.isOK())
+ uasserted(17269,
+ str::stream() << "Could not create new _id for insert: " << s.reason());
}
}
- StatusWith<DiskLoc> newLoc = collection->insertDocument( newObj, !request.isGod() /* enforceQuota */ );
- uassertStatusOK( newLoc.getStatus() );
- if ( request.shouldUpdateOpLog() ) {
- logOp( "i", nsString.ns().c_str(), newObj,
- NULL, NULL, request.isFromMigration(), &newObj );
+ // Validate that the object replacement or modifiers resulted in a document
+ // that contains all the immutable keys and can be stored.
+ if (!(request.isFromReplication() || request.isFromMigration())){
+ const std::vector<FieldRef*>* immutableFields = NULL;
+ if (const UpdateLifecycle* lifecycle = request.getLifecycle())
+ immutableFields = lifecycle->getImmutableFields();
+
+ uassertStatusOK(validate(idRequired,
+ original,
+ updatedFields,
+ doc,
+ immutableFields,
+ driver->modOptions()) );
+ }
+
+ // Insert the doc
+ BSONObj newObj = doc.getObject();
+ StatusWith<DiskLoc> newLoc = collection->insertDocument(newObj,
+ !request.isGod() /*enforceQuota*/);
+ uassertStatusOK(newLoc.getStatus());
+ if (request.shouldCallLogOp()) {
+ logOp("i", nsString.ns().c_str(), newObj,
+ NULL, NULL, request.isFromMigration(), &newObj);
}
opDebug->nupdated = 1;
- return UpdateResult( false /* updated a non existing document */,
- !driver->isDocReplacement() /* $mod or obj replacement? */,
- 1 /* count of updated documents */,
- newObj /* object that was upserted */ );
+ return UpdateResult(false /* updated a non existing document */,
+ !driver->isDocReplacement() /* $mod or obj replacement? */,
+ 1 /* count of updated documents */,
+ newObj /* object that was upserted */ );
}
- BSONObj applyUpdateOperators( const BSONObj& from, const BSONObj& operators ) {
+ BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators) {
UpdateDriver::Options opts;
opts.multi = false;
opts.upsert = false;
- UpdateDriver driver( opts );
- Status status = driver.parse( operators );
- if ( !status.isOK() ) {
- uasserted( 16838, status.reason() );
+ UpdateDriver driver(opts);
+ Status status = driver.parse(operators);
+ if (!status.isOK()) {
+ uasserted(16838, status.reason());
}
- mutablebson::Document doc( from, mutablebson::Document::kInPlaceDisabled );
- status = driver.update( StringData(), &doc, NULL /* not oplogging */ );
- if ( !status.isOK() ) {
- uasserted( 16839, status.reason() );
+ mutablebson::Document doc(from, mutablebson::Document::kInPlaceDisabled);
+ status = driver.update(StringData(), &doc);
+ if (!status.isOK()) {
+ uasserted(16839, status.reason());
}
return doc.getObject();
diff --git a/src/mongo/db/ops/update_driver.cpp b/src/mongo/db/ops/update_driver.cpp
index 64d2616a2f5..34a0e2163f5 100644
--- a/src/mongo/db/ops/update_driver.cpp
+++ b/src/mongo/db/ops/update_driver.cpp
@@ -30,6 +30,7 @@
#include "mongo/base/error_codes.h"
#include "mongo/base/string_data.h"
+#include "mongo/bson/mutable/algorithm.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/db/field_ref.h"
#include "mongo/db/ops/log_builder.h"
@@ -42,12 +43,15 @@
namespace mongo {
namespace str = mongoutils::str;
+ namespace mb = mongo::mutablebson;
UpdateDriver::UpdateDriver(const Options& opts)
- : _multi(opts.multi)
+ : _replacementMode(false)
+ , _multi(opts.multi)
, _upsert(opts.upsert)
, _logOp(opts.logOp)
- , _modOptions(opts.modOptions) {
+ , _modOptions(opts.modOptions)
+ , _affectIndices(false) {
}
UpdateDriver::~UpdateDriver() {
@@ -117,23 +121,10 @@ namespace mongo {
while (innerIter.more()) {
BSONElement innerModElem = innerIter.next();
- if (innerModElem.eoo()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << outerModElem.fieldName()
- << "." << innerModElem.fieldName()
- << " has no value: " << innerModElem
- << " which is not allowed for any $<mod>.");
- }
-
- auto_ptr<ModifierInterface> mod(modifiertable::makeUpdateMod(modType));
- dassert(mod.get());
-
- Status status = mod->init(innerModElem, _modOptions);
+ Status status = addAndParse(modType, innerModElem);
if (!status.isOK()) {
return status;
}
-
- _mods.push_back(mod.release());
}
}
@@ -144,71 +135,114 @@ namespace mongo {
return Status::OK();
}
- Status UpdateDriver::createFromQuery(const BSONObj& query, mutablebson::Document& doc) {
- BSONObjIteratorSorted i(query);
- while (i.more()) {
- BSONElement e = i.next();
- // TODO: get this logic/exclude-list from the query system?
- if (e.fieldName()[0] == '$' || e.fieldNameStringData() == "_id")
- continue;
+ inline Status UpdateDriver::addAndParse(const modifiertable::ModifierType type,
+ const BSONElement& elem) {
+ if (elem.eoo()) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "'" << elem.fieldName()
+ << "' has no value in : " << elem
+ << " which is not allowed for any $" << type << " mod.");
+ }
+ auto_ptr<ModifierInterface> mod(modifiertable::makeUpdateMod(type));
+ dassert(mod.get());
- if (e.type() == Object && e.embeddedObject().firstElementFieldName()[0] == '$') {
- // we have something like { x : { $gt : 5 } }
- // this can be a query piece
- // or can be a dbref or something
+ Status status = mod->init(elem, _modOptions);
+ if (!status.isOK()) {
+ return status;
+ }
- int op = e.embeddedObject().firstElement().getGtLtOp();
- if (op > 0) {
- // This means this is a $gt type filter, so don't make it part of the new
- // object.
- continue;
- }
+ _mods.push_back(mod.release());
- if (mongoutils::str::equals(e.embeddedObject().firstElement().fieldName(),
- "$not")) {
- // A $not filter operator is not detected in getGtLtOp() and should not
- // become part of the new object.
- continue;
- }
+ return Status::OK();
+ }
+
+ Status UpdateDriver::populateDocumentWithQueryFields(const BSONObj& query,
+ mutablebson::Document& doc) const {
+ if (isDocReplacement()) {
+ BSONElement idElem = query.getField("_id");
+ // Replacement mods need the _id field copied explicitly.
+ if (idElem.ok()) {
+ mb::Element elem = doc.makeElement(idElem);
+ return doc.root().pushFront(elem);
}
+ }
+ else {
+ // Create a new UpdateDriver to create the base doc from the query
+ Options opts;
+ opts.logOp = false;
+ opts.multi = false;
+ opts.upsert = true;
+ opts.modOptions = modOptions();
+
+ UpdateDriver insertDriver(opts);
+ insertDriver.setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT);
+
+ // parse query $set mods, removing non-equality stuff
+ BSONObjIteratorSorted i(query);
+ while (i.more()) {
+ BSONElement e = i.next();
+ // TODO: get this logic/exclude-list from the query system?
+ if (e.fieldName()[0] == '$')
+ continue;
- // Add to the field to doc after expanding and checking for conflicts.
- FieldRef elemName;
- const StringData& elemNameSD(e.fieldNameStringData());
- elemName.parse(elemNameSD);
- size_t pos;
- mutablebson::Element* elemFound = NULL;
+ if (e.type() == Object && e.embeddedObject().firstElementFieldName()[0] == '$') {
+ // we have something like { x : { $gt : 5 } }
+ // this can be a query piece
+ // or can be a dbref or something
- Status status = pathsupport::findLongestPrefix(elemName, doc.root(), &pos, elemFound);
- // Not NonExistentPath, of OK, return
- if (!(status.code() == ErrorCodes::NonExistentPath || status.isOK()))
- return status;
+ int op = e.embeddedObject().firstElement().getGtLtOp();
+ if (op > 0) {
+ // This means this is a $gt type filter, so don't make it part of the new
+ // object.
+ continue;
+ }
- status = pathsupport::createPathAt(elemName,
- 0,
- doc.root(),
- doc.makeElementWithNewFieldName(
- elemName.getPart(elemName.numParts()-1),
- e));
- if (!status.isOK())
- return status;
+ if (mongoutils::str::equals(e.embeddedObject().firstElement().fieldName(),
+ "$not")) {
+ // A $not filter operator is not detected in getGtLtOp() and should not
+ // become part of the new object.
+ continue;
+ }
+ }
+
+ // Add this element as a $set modifier
+ Status s = insertDriver.addAndParse(modifiertable::MOD_SET, e);
+ if (!s.isOK())
+ return s;
+ }
+
+ // update the document with base field
+ Status s = insertDriver.update(StringData(), &doc);
+ if (!s.isOK()) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ str::stream() << "Cannot create base during"
+ " insert of update. Caused by :"
+ << s.toString());
+ }
}
return Status::OK();
}
Status UpdateDriver::update(const StringData& matchedField,
mutablebson::Document* doc,
- BSONObj* logOpRec) {
+ BSONObj* logOpRec,
+ FieldRefSet* updatedFields) {
// TODO: assert that update() is called at most once in a !_multi case.
- FieldRefSet targetFields;
+ // Use the passed in FieldRefSet
+ FieldRefSet* targetFields = updatedFields;
- _affectIndices = false;
+ // If we didn't get a FieldRefSet* from the caller, allocate storage and use
+ // the scoped_ptr for lifecycle management
+ scoped_ptr<FieldRefSet> targetFieldScopedPtr;
+ if (!targetFields) {
+ targetFieldScopedPtr.reset(new FieldRefSet());
+ targetFields = targetFieldScopedPtr.get();
+ }
- if (_shardKeyState)
- _shardKeyState->affectedKeySet.clear();
+ _affectIndices = false;
_logDoc.reset();
LogBuilder logBuilder(_logDoc.root());
@@ -242,21 +276,15 @@ namespace mongo {
break;
}
- if (!execInfo.noOp && _shardKeyState)
- _shardKeyState->keySet.getConflicts(
- execInfo.fieldRef[i],
- &_shardKeyState->affectedKeySet);
-
- if (!targetFields.empty() || _mods.size() > 1) {
- const FieldRef* other;
- if (!targetFields.insert(execInfo.fieldRef[i], &other)) {
- return Status(ErrorCodes::ConflictingUpdateOperators,
- str::stream() << "Cannot update '"
- << other->dottedField()
- << "' and '"
- << execInfo.fieldRef[i]->dottedField()
- << "' at the same time");
- }
+ // Record each field being updated but check for conflicts first
+ const FieldRef* other;
+ if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
+ return Status(ErrorCodes::ConflictingUpdateOperators,
+ str::stream() << "Cannot update '"
+ << other->dottedField()
+ << "' and '"
+ << execInfo.fieldRef[i]->dottedField()
+ << "' at the same time");
}
// We start with the expectation that a mod will be in-place. But if the mod
@@ -312,152 +340,6 @@ namespace mongo {
_indexedFields = indexedFields;
}
- void UpdateDriver::refreshShardKeyPattern(const BSONObj& shardKeyPattern) {
-
- // An empty pattern object means no shard keys.
- if (shardKeyPattern.isEmpty()) {
- _shardKeyState.reset();
- return;
- }
-
- // If we have already parsed an identical shard key pattern, don't do it again.
- if (_shardKeyState && (_shardKeyState->pattern.woCompare(shardKeyPattern) == 0))
- return;
-
- // Reset the shard key state and capture the new pattern.
- _shardKeyState.reset(new ShardKeyState);
- _shardKeyState->pattern = shardKeyPattern;
-
- // Parse the shard keys into the states 'keys' and 'keySet' members.
- BSONObjIterator patternIter = _shardKeyState->pattern.begin();
- while (patternIter.more()) {
- BSONElement current = patternIter.next();
-
- _shardKeyState->keys.mutableVector().push_back(new FieldRef);
- FieldRef* const newFieldRef = _shardKeyState->keys.mutableVector().back();
- newFieldRef->parse(current.fieldNameStringData());
-
- // TODO: what about bad parse?
-
- const FieldRef* conflict;
- if ( !_shardKeyState->keySet.insert( newFieldRef, &conflict ) ) {
- // This seems pretty unlikely in practice.
- uasserted(
- 17152, str::stream()
- << "Shard key '"
- << newFieldRef->dottedField()
- << "' conflicts with shard key '"
- << conflict->dottedField()
- << "'" );
- }
- }
- }
-
- bool UpdateDriver::modsAffectShardKeys() const {
- // If we have no shard key state, the mods could not have affected them.
- if (!_shardKeyState)
- return false;
-
- // In replacement mode, we always assume that all shard keys need to be checked. For
- // upsert, we are inserting a new object, so it must have all shard keys. Otherwise, it
- // depends on whether any shard keys were added to the affectedKeySet state.
- return (_replacementMode || !_shardKeyState->affectedKeySet.empty());
- }
-
- Status UpdateDriver::checkShardKeysUnaltered(const BSONObj& original,
- const mutablebson::Document& updated) const {
-
- if (!_shardKeyState)
- return Status::OK();
-
- // In replacement mode, we validate the values for all shard keys. Otherwise, only the
- // ones tagged as being potentially invalidated. For an upsert, we check all keys.
- const FieldRefSet& affected = (_replacementMode || original.isEmpty()) ?
- _shardKeyState->keySet :
- _shardKeyState->affectedKeySet;
-
- const mutablebson::ConstElement id = updated.root()["_id"];
-
- FieldRefSet::const_iterator where = affected.begin();
- const FieldRefSet::const_iterator end = affected.end();
- for( ; where != end; ++where ) {
- const FieldRef& current = **where;
-
- // Find the affected field in the updated document.
- mutablebson::ConstElement elt = updated.root();
- size_t currentPart = 0;
- while (elt.ok() && currentPart < current.numParts())
- elt = elt[current.getPart(currentPart++)];
-
- if (!elt.ok()) {
- if (original.isEmpty()) {
- return Status(ErrorCodes::NoSuchKey,
- mongoutils::str::stream()
- << "After applying modifiers of replacement in an upsert"
- << "', the shard key field '" << current.dottedField() <<
- "' was not found in the resulting document.");
-
- }
- else {
- return Status(ErrorCodes::ImmutableShardKeyField,
- mongoutils::str::stream()
- << "After applying updates to the object with _id '"
- << id.toString()
- << "', the shard key field '" << current.dottedField() <<
- "' was found to have been removed from the document");
- }
- }
-
- // For upserts, we don't have an original object to compare against. The existence
- // check above must be sufficient for now. Potentially, we could do keyBelongsTo me
- // here.
- //
- // TODO: Investigate calling keyBelongsToMe here. We'd need to do this if we want
- // mongod to be a full backstop for incorrect updates in a forward compatible way,
- // looking at the day where we open up the floodgates for updates through mongod.
- if (!original.isEmpty()) {
-
- // Find the potentially affected field in the original document.
- const BSONElement foundInOld = original.getFieldDotted(current.dottedField());
- if (foundInOld.eoo()) {
-
- // NOTE: These errors should really never occur. It would mean that the base
- // document on disk was missing the shard key, so we have no way to validate
- // that it wasn't changed.
-
- BSONElement id;
- if (original.getObjectID(id)) {
- return Status(ErrorCodes::NoSuchKey,
- mongoutils::str::stream()
- << "While updating object with _id '" << id << "', the field"
- << " for shard key '"
- << current.dottedField() << "' was not found "
- << "in the original document");
- }
- else {
- return Status(ErrorCodes::NoSuchKey,
- mongoutils::str::stream()
- << "While updating an object with no '_id' field, the field"
- << " for shard key '"
- << current.dottedField() << "' was not found "
- << "in the original document");
- }
- }
-
- if (elt.compareWithBSONElement(foundInOld, false) != 0) {
- return Status(ErrorCodes::ImmutableShardKeyField,
- mongoutils::str::stream()
- << "After applying updates to the object with _id '" <<
- id.toString()
- << "', the shard key field '" << current.dottedField() <<
- "' was found to have been altered");
- }
- }
- }
-
- return Status::OK();
- }
-
bool UpdateDriver::multi() const {
return _multi;
}
@@ -498,7 +380,7 @@ namespace mongo {
_context = context;
}
- BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj doc, bool multi) const {
+ BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj& doc, bool multi) const {
BSONObjBuilder idPattern;
BSONElement id;
// NOTE: If the matching object lacks an id, we'll log
@@ -523,7 +405,6 @@ namespace mongo {
}
_indexedFields.clear();
_replacementMode = false;
- _shardKeyState.reset();
}
} // namespace mongo
diff --git a/src/mongo/db/ops/update_driver.h b/src/mongo/db/ops/update_driver.h
index 972647b7580..ea5744d2279 100644
--- a/src/mongo/db/ops/update_driver.h
+++ b/src/mongo/db/ops/update_driver.h
@@ -38,6 +38,7 @@
#include "mongo/db/index_set.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/ops/modifier_interface.h"
+#include "mongo/db/ops/modifier_table.h"
namespace mongo {
@@ -66,13 +67,13 @@ namespace mongo {
* Returns Status::OK() if the document can be used. If there are any error or
* conflicts along the way then those errors will be returned.
*/
- static Status createFromQuery(const BSONObj& query, mutablebson::Document& doc);
+ Status populateDocumentWithQueryFields(const BSONObj& query, mutablebson::Document& doc) const;
/**
* return a BSONObj with the _id field of the doc passed in, or the doc itself.
* If no _id and multi, error.
*/
- BSONObj makeOplogEntryQuery(const BSONObj doc, bool multi) const;
+ BSONObj makeOplogEntryQuery(const BSONObj& doc, bool multi) const;
/**
* Returns OK and executes '_mods' over 'doc', generating 'newObj'. If any mod is
@@ -83,10 +84,14 @@ namespace mongo {
* If the driver's '_logOp' mode is turned on, and if 'logOpRec' is not NULL, fills in
* the latter with the oplog entry corresponding to the update. If '_mods' can't be
* applied, returns an error status with a corresponding description.
+ *
+ * If a non-NULL updatedField vector* is supplied,
+ * then all updated fields will be added to it.
*/
Status update(const StringData& matchedField,
mutablebson::Document* doc,
- BSONObj* logOpRec);
+ BSONObj* logOpRec = NULL,
+ FieldRefSet* updatedFields = NULL);
//
// Accessors
@@ -99,30 +104,6 @@ namespace mongo {
bool modsAffectIndices() const;
void refreshIndexKeys(const IndexPathSet& indexedFields);
- /** Inform the update driver of which fields are shard keys so that attempts to modify
- * those fields can be rejected by the driver. Pass an empty object to indicate that
- * no shard keys are in play.
- */
- void refreshShardKeyPattern(const BSONObj& shardKeyPattern);
-
- /** After calling 'update' above, this will return true if it appears that the modifier
- * updates may have altered any shard keys. If this returns 'true',
- * 'verifyShardKeysUnaltered' should be called with the original unmutated object so
- * field comparisons can be made and illegal mutations detected.
- */
- bool modsAffectShardKeys() const;
-
- /** If the mods were detected to have potentially affected shard keys during a
- * non-upsert udpate, call this method, providing the original unaltered document so
- * that the apparently altered fields can be verified to have not actually changed. A
- * non-OK status indicates that at least one mutation to a shard key was detected, and
- * the update should be rejected rather than applied. You may pass an empty original
- * object on an upsert, since there is not an original object against which to
- * compare. In that case, only the existence of shard keys in 'updated' is verified.
- */
- Status checkShardKeysUnaltered(const BSONObj& original,
- const mutablebson::Document& updated) const;
-
bool multi() const;
void setMulti(bool multi);
@@ -143,6 +124,10 @@ namespace mongo {
/** Resets the state of the class associated with mods (not the error state) */
void clear();
+ /** Create the modifier and add it to the back of the modifiers vector */
+ inline Status addAndParse(const modifiertable::ModifierType type,
+ const BSONElement& elem);
+
//
// immutable properties after parsing
//
@@ -179,25 +164,6 @@ namespace mongo {
// at each call to update.
bool _affectIndices;
- // Holds the fields relevant to any optional shard key state.
- struct ShardKeyState {
- // The current shard key pattern
- BSONObj pattern;
-
- // A vector owning the FieldRefs parsed from the pattern field names.
- OwnedPointerVector<FieldRef> keys;
-
- // A FieldRefSet containing pointers to the FieldRefs in 'keys'.
- FieldRefSet keySet;
-
- // The current set of keys known to be affected by the current update. This is
- // reset on each call to 'update'.
- FieldRefSet affectedKeySet;
- };
-
- // If shard keys have been set, holds the relevant state.
- boost::scoped_ptr<ShardKeyState> _shardKeyState;
-
// Is this update going to be an upsert?
ModifierInterface::ExecInfo::UpdateContext _context;
diff --git a/src/mongo/db/ops/update_driver_test.cpp b/src/mongo/db/ops/update_driver_test.cpp
index cdc9baadd44..3a13e14269f 100644
--- a/src/mongo/db/ops/update_driver_test.cpp
+++ b/src/mongo/db/ops/update_driver_test.cpp
@@ -28,7 +28,9 @@
#include "mongo/db/ops/update_driver.h"
-#include "mongo/db/field_ref_set.h"
+#include "mongo/base/string_data.h"
+#include "mongo/bson/mutable/document.h"
+#include "mongo/bson/mutable/mutable_bson_test_utils.h"
#include "mongo/db/index_set.h"
#include "mongo/db/json.h"
#include "mongo/unittest/unittest.h"
@@ -36,8 +38,6 @@
namespace {
using mongo::BSONObj;
- using mongo::FieldRef;
- using mongo::FieldRefSet;
using mongo::fromjson;
using mongo::IndexPathSet;
using mongo::mutablebson::Document;
@@ -115,180 +115,43 @@ namespace {
ASSERT_FALSE(driver.isDocReplacement());
}
- // A base class for shard key immutability tests. We construct a document (see 'setUp'
- // below for the document structure), and declare the two subfields "s.a' and 's.b' to be
- // the shard keys, then test that various mutations that affect (or don't) the shard keys
- // are rejected (or permitted).
- class ShardKeyTest : public mongo::unittest::Test {
- public:
- ShardKeyTest()
- : _shardKeyPattern(fromjson("{ 's.a' : 1, 's.c' : 1 }")) {
- }
- void setUp() {
- // All elements here are arrays so that we can perform a no-op that won't be
- // detected as such by the update code, which would foil our testing. Instead, we
- // use $push with $slice.
- _obj.reset(new BSONObj(
- fromjson("{ x : [1], s : { a : [1], b : [2], c : [ 3, 3, 3 ] } }")));
- _doc.reset(new Document(*_obj));
- _driver.reset(new UpdateDriver(UpdateDriver::Options()));
- }
-
- protected:
- BSONObj _shardKeyPattern;
- boost::scoped_ptr<BSONObj> _obj;
- boost::scoped_ptr<Document> _doc;
- boost::scoped_ptr<UpdateDriver> _driver;
- };
-
- TEST_F(ShardKeyTest, NoOpsDoNotAffectShardKeys) {
- BSONObj mod(fromjson("{ $set : { 's.a.0' : 1, 's.c.0' : 3 } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
- ASSERT_FALSE(_driver->modsAffectShardKeys());
- }
-
- TEST_F(ShardKeyTest, MutatingShardKeyFieldRejected) {
- BSONObj mod(fromjson("{ $push : { 's.a' : { $each : [2], $slice : -1 } } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should be rejected, we are changing the value of a shard key.
- ASSERT_NOT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, MutatingShardKeyFieldRejectedObjectReplace) {
- BSONObj mod(fromjson("{ x : [1], s : { a : [2], b : [2], c : [ 3, 3, 3 ] } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should be rejected, we are changing the value of a shard key.
- ASSERT_NOT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, SettingShardKeyFieldToSameValueIsNotRejected) {
- BSONObj mod(fromjson("{ $push : { 's.a' : { $each : [1], $slice : -1 } } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- // It is a no-op, so we don't see it as affecting.
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should not be rejected: 's.a' has the same value as it did originally.
- ASSERT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, UnsettingShardKeyFieldRejected) {
- BSONObj mod(fromjson("{ $unset : { 's.a' : 1 } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should be rejected, we are removing one of the shard key fields
- ASSERT_NOT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, SettingShardKeyChildrenRejected) {
- BSONObj mod(fromjson("{ $set : { 's.c.0' : 0 } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should be rejected, we are setting a value subsumed under one of the shard keys.
- ASSERT_NOT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, UnsettingShardKeyChildrenRejected) {
- BSONObj mod(fromjson("{ $unset : { 's.c.0' : 1 } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should be rejected, we are removing one of the shard key fields
- ASSERT_NOT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, SettingShardKeyChildrenToSameValueIsNotRejected) {
- BSONObj mod(fromjson("{ $push : { 's.c' : { $each : [3], $slice : -3 } } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should not be rejected, we are setting a value subsumed under one of the shard keys,
- // but the set is a logical no-op.
- ASSERT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, AppendingToShardKeyChildrenRejected) {
- BSONObj mod(fromjson("{ $push : { 's.c' : 4 } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
-
- ASSERT_TRUE(_driver->modsAffectShardKeys());
-
- // Should be rejected, we are adding a new child under one of the shard keys.
- ASSERT_NOT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
- }
-
- TEST_F(ShardKeyTest, ModificationsToUnrelatedFieldsAreOK) {
- BSONObj mod(fromjson("{ $set : { x : 2, 's.b' : 'x' } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
+ // Test the upsert case where we copy the query parts into the new doc
+ TEST(CreateFromQuery, Basic) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ Document doc;
- // Should not claim to have affected shard keys
- ASSERT_FALSE(_driver->modsAffectShardKeys());
+ BSONObj query = fromjson("{a:1, b:1}");
+ ASSERT_OK(driver.populateDocumentWithQueryFields(query, doc));
+ ASSERT_EQUALS(query, doc);
}
- TEST_F(ShardKeyTest, RemovingUnrelatedFieldsIsOK) {
- BSONObj mod(fromjson("{ $unset : { x : 1, 's.b' : 1 } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
+ TEST(CreateFromQuery, BasicWithId) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ Document doc;
- // Should not claim to have affected shard keys
- ASSERT_FALSE(_driver->modsAffectShardKeys());
+ BSONObj query = fromjson("{_id:1, a:1, b:1}");
+ ASSERT_OK(driver.populateDocumentWithQueryFields(query, doc));
+ ASSERT_EQUALS(query, doc);
}
- TEST_F(ShardKeyTest, AddingUnrelatedFieldsIsOK) {
- BSONObj mod(fromjson("{ $set : { z : 1 } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
+ TEST(CreateFromQuery, NestedSharedRoot) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ Document doc;
- // Should not claim to have affected shard keys
- ASSERT_FALSE(_driver->modsAffectShardKeys());
+ ASSERT_OK(driver.populateDocumentWithQueryFields(fromjson("{'a.c':1, 'a.b':1}"), doc));
}
- TEST_F(ShardKeyTest, OverwriteShardKeyFieldWithSameValueIsNotAnErrorObjectReplace) {
- BSONObj mod(fromjson("{ x : [1], s : { a : [1], b : [2], c : [ 3, 3, 3 ] } }"));
- ASSERT_OK(_driver->parse(mod));
- _driver->refreshShardKeyPattern(_shardKeyPattern);
- ASSERT_OK(_driver->update(StringData(), _doc.get(), NULL));
- ASSERT_TRUE(_driver->modsAffectShardKeys());
+ // Failures
+ TEST(CreateFromQuery, DupFieldsFail) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ Document doc;
- // Applying the above mod should be OK, since we didn't actually change any of the
- // shard key values.
- ASSERT_OK(_driver->checkShardKeysUnaltered(*_obj, *_doc));
+ ASSERT_NOT_OK(driver.populateDocumentWithQueryFields(fromjson("{a:1, 'a.b':1}"), doc));
}
-
} // unnamed namespace
diff --git a/src/mongo/db/ops/update_lifecycle.h b/src/mongo/db/ops/update_lifecycle.h
new file mode 100644
index 00000000000..4fd226999d3
--- /dev/null
+++ b/src/mongo/db/ops/update_lifecycle.h
@@ -0,0 +1,64 @@
+/**
+ * Copyright (C) 2013 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/field_ref.h"
+#include "mongo/db/structure/collection.h"
+#include "mongo/s/chunk_version.h"
+
+namespace mongo {
+
+ class UpdateLifecycle {
+ public:
+
+ virtual ~UpdateLifecycle() {}
+
+ /**
+ * Can the update continue?
+ *
+ * The (only) implementation will check the following:
+ * 1.) Collection still exists
+ * 2.) Shard version has not changed (indicating that the query/update is not valid
+ */
+ virtual const bool canContinue() const = 0;
+
+ /**
+ * Set the out parameter if there is a collection and it has indexes
+ */
+ virtual const void getIndexKeys(IndexPathSet* returnedIndexPathSet) const = 0;
+
+ /**
+ * Returns the shard keys as immutable fields
+ * Immutable fields in this case mean that they are required to exist, cannot change values
+ * and must not be multi-valued (in an array, or an array)
+ */
+ virtual const std::vector<FieldRef*>* getImmutableFields() const = 0;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp
new file mode 100644
index 00000000000..a2e53fe9cee
--- /dev/null
+++ b/src/mongo/db/ops/update_lifecycle_impl.cpp
@@ -0,0 +1,85 @@
+/**
+ * Copyright (C) 2013 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/ops/update_lifecycle_impl.h"
+
+#include "mongo/db/client.h"
+#include "mongo/db/database.h"
+#include "mongo/db/field_ref.h"
+#include "mongo/db/structure/collection.h"
+#include "mongo/s/chunk_version.h"
+#include "mongo/s/d_logic.h"
+
+namespace mongo {
+ namespace {
+ CollectionMetadataPtr getMetadata(const NamespaceString& nsString) {
+ if (shardingState.needCollectionMetadata(nsString.ns())) {
+ return shardingState.getCollectionMetadata(nsString.ns());
+ }
+ return CollectionMetadataPtr();
+ }
+ }
+
+ UpdateLifecycleImpl::UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsStr)
+ : _nsString(nsStr)
+ , _shardVersion((!ignoreVersion && getMetadata(_nsString)) ?
+ getMetadata(_nsString)->getShardVersion() :
+ ChunkVersion::IGNORED()) {
+ }
+
+ const bool UpdateLifecycleImpl::canContinue() const {
+ // Collection needs to exist to continue
+ Collection* coll = cc().database()->getCollection(_nsString.ns());
+ if (!coll)
+ return false;
+
+ // Shard version must be compatible to continue
+ const CollectionMetadataPtr metadata = getMetadata(_nsString);
+ const ChunkVersion shardVersion = metadata ?
+ metadata->getShardVersion() : ChunkVersion::IGNORED();
+ return (ChunkVersion::isIgnoredVersion(shardVersion) ||
+ _shardVersion.isWriteCompatibleWith(shardVersion));
+ }
+
+ const void UpdateLifecycleImpl::getIndexKeys(IndexPathSet* returnedIndexPathSet) const {
+ Collection* coll = cc().database()->getCollection(_nsString.ns());
+ if (coll)
+ *returnedIndexPathSet = coll->infoCache()->indexKeys();
+ }
+
+ const std::vector<FieldRef*>* UpdateLifecycleImpl::getImmutableFields() const {
+ CollectionMetadataPtr metadata = getMetadata(_nsString);
+ if (metadata) {
+ const std::vector<FieldRef*>& fields = metadata->getKeyPatternFields();
+ // Return shard-keys as immutable for the update system.
+ return &fields;
+ }
+ return NULL;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/update_lifecycle_impl.h b/src/mongo/db/ops/update_lifecycle_impl.h
new file mode 100644
index 00000000000..d82c8b24336
--- /dev/null
+++ b/src/mongo/db/ops/update_lifecycle_impl.h
@@ -0,0 +1,60 @@
+/**
+ * Copyright (C) 2013 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/disallow_copying.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/ops/update_lifecycle.h"
+#include "mongo/db/structure/collection.h"
+
+namespace mongo {
+
+ class UpdateLifecycleImpl : public UpdateLifecycle {
+ MONGO_DISALLOW_COPYING(UpdateLifecycleImpl);
+
+ public:
+
+ /**
+ * ignoreVersion is for shard version checking and
+ * means that version checks will not be done
+ *
+ * nsString represents the namespace for the
+ */
+ UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsString);
+
+ virtual const bool canContinue() const;
+ virtual const void getIndexKeys(IndexPathSet* returnedIndexPathSet) const;
+ virtual const std::vector<FieldRef*>* getImmutableFields() const;
+
+ private:
+ const NamespaceString& _nsString;
+ ChunkVersion _shardVersion;
+ };
+
+} /* namespace mongo */
diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h
index ce590b8ba62..36e1b20bde1 100644
--- a/src/mongo/db/ops/update_request.h
+++ b/src/mongo/db/ops/update_request.h
@@ -38,6 +38,9 @@ namespace mongo {
namespace str = mongoutils::str;
+ class FieldRef;
+ class UpdateLifecycle;
+
class UpdateRequest {
public:
inline UpdateRequest(
@@ -48,9 +51,10 @@ namespace mongo {
, _god(false)
, _upsert(false)
, _multi(false)
- , _updateOpLog(false)
+ , _callLogOp(false)
, _fromMigration(false)
- , _fromReplication(false) {}
+ , _fromReplication(false)
+ , _lifecycle(NULL) {}
const NamespaceString& getNamespaceString() const {
return _nsString;
@@ -104,11 +108,11 @@ namespace mongo {
}
inline void setUpdateOpLog(bool value = true) {
- _updateOpLog = value;
+ _callLogOp = value;
}
- bool shouldUpdateOpLog() const {
- return _updateOpLog;
+ bool shouldCallLogOp() const {
+ return _callLogOp;
}
inline void setFromMigration(bool value = true) {
@@ -127,6 +131,14 @@ namespace mongo {
return _fromReplication;
}
+ inline void setLifecycle(const UpdateLifecycle* value) {
+ _lifecycle = value;
+ }
+
+ inline const UpdateLifecycle* getLifecycle() const {
+ return _lifecycle;
+ }
+
const std::string toString() const {
return str::stream()
<< " query: " << _query
@@ -134,7 +146,7 @@ namespace mongo {
<< " god: " << _god
<< " upsert: " << _upsert
<< " multi: " << _multi
- << " logToOplog: " << _updateOpLog
+ << " callLogOp: " << _callLogOp
<< " fromMigration: " << _fromMigration
<< " fromReplications: " << _fromReplication;
}
@@ -162,13 +174,16 @@ namespace mongo {
bool _multi;
// True if the effects of the update should be written to the oplog.
- bool _updateOpLog;
+ bool _callLogOp;
// True if this update is on behalf of a chunk migration.
bool _fromMigration;
// True if this update is being applied during the application for the oplog.
bool _fromReplication;
+
+ // The lifecycle data, and events used during the update request.
+ const UpdateLifecycle* _lifecycle;
};
} // namespace mongo
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index f88a1867ab8..2560b0944f3 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/ops/update.h"
+#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/replication_server_status.h"
@@ -521,6 +522,8 @@ namespace mongo {
request.setUpdates(o);
request.setUpsert();
request.setFromReplication();
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
update(request, &debug);
@@ -548,6 +551,8 @@ namespace mongo {
request.setUpdates(o);
request.setUpsert();
request.setFromReplication();
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
update(request, &debug);
}
@@ -573,6 +578,8 @@ namespace mongo {
request.setUpdates(o);
request.setUpsert(upsert);
request.setFromReplication();
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
UpdateResult ur = update(request, &debug);
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 6ab9f987ea9..f96bb3c5ab7 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/cloner.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/ops/update_request.h"
+#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/rs.h"
@@ -567,6 +568,8 @@ namespace mongo {
request.setUpdates(i->second);
request.setGod();
request.setUpsert();
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
update(request, &debug);
diff --git a/src/mongo/s/collection_metadata.cpp b/src/mongo/s/collection_metadata.cpp
index 9a0c66ba663..f49bc911c40 100644
--- a/src/mongo/s/collection_metadata.cpp
+++ b/src/mongo/s/collection_metadata.cpp
@@ -96,6 +96,7 @@ namespace mongo {
auto_ptr<CollectionMetadata> metadata( new CollectionMetadata );
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_chunksMap.erase( chunk.getMin() );
@@ -146,6 +147,7 @@ namespace mongo {
auto_ptr<CollectionMetadata> metadata( new CollectionMetadata );
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_chunksMap.insert( make_pair( chunk.getMin().getOwned(),
@@ -189,6 +191,7 @@ namespace mongo {
auto_ptr<CollectionMetadata> metadata( new CollectionMetadata );
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_pendingMap.erase( pending.getMin() );
metadata->_chunksMap = this->_chunksMap;
@@ -224,6 +227,7 @@ namespace mongo {
auto_ptr<CollectionMetadata> metadata( new CollectionMetadata );
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_rangesMap = this->_rangesMap;
@@ -324,6 +328,7 @@ namespace mongo {
auto_ptr<CollectionMetadata> metadata(new CollectionMetadata);
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_shardVersion = newShardVersion; // will increment 2nd, 3rd,... chunks below
@@ -411,6 +416,7 @@ namespace mongo {
auto_ptr<CollectionMetadata> metadata( new CollectionMetadata );
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_rangesMap = this->_rangesMap;
@@ -705,4 +711,17 @@ namespace mongo {
_rangesMap.insert(make_pair(min, max));
}
+ void CollectionMetadata::fillKeyPatternFields() {
+ // Parse the shard keys into the states 'keys' and 'keySet' members.
+ BSONObjIterator patternIter = _keyPattern.begin();
+ while (patternIter.more()) {
+ BSONElement current = patternIter.next();
+
+ _keyFields.mutableVector().push_back(new FieldRef);
+ FieldRef* const newFieldRef = _keyFields.mutableVector().back();
+ newFieldRef->parse(current.fieldNameStringData());
+ }
+ }
+
+
} // namespace mongo
diff --git a/src/mongo/s/collection_metadata.h b/src/mongo/s/collection_metadata.h
index 99ef01b6d3c..8f4588b789b 100644
--- a/src/mongo/s/collection_metadata.h
+++ b/src/mongo/s/collection_metadata.h
@@ -29,6 +29,8 @@
#pragma once
#include "mongo/base/disallow_copying.h"
+#include "mongo/base/owned_pointer_vector.h"
+#include "mongo/db/field_ref_set.h"
#include "mongo/db/jsobj.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/range_arithmetic.h"
@@ -189,6 +191,10 @@ namespace mongo {
return _keyPattern;
}
+ const std::vector<FieldRef*>& getKeyPatternFields() const {
+ return _keyFields.vector();
+ }
+
BSONObj getMinKey() const;
BSONObj getMaxKey() const;
@@ -271,6 +277,9 @@ namespace mongo {
// key pattern for chunks under this range
BSONObj _keyPattern;
+ // A vector owning the FieldRefs parsed from the shard-key pattern of field names.
+ OwnedPointerVector<FieldRef> _keyFields;
+
//
// RangeMaps represent chunks by mapping the min key to the chunk's max key, allowing
// efficient lookup and intersection.
@@ -297,6 +306,10 @@ namespace mongo {
*/
void fillRanges();
+ /**
+ * Creates the _keyField* local data
+ */
+ void fillKeyPatternFields();
};
} // namespace mongo
diff --git a/src/mongo/s/metadata_loader.cpp b/src/mongo/s/metadata_loader.cpp
index 941514b1f09..c463bd4a13b 100644
--- a/src/mongo/s/metadata_loader.cpp
+++ b/src/mongo/s/metadata_loader.cpp
@@ -160,6 +160,7 @@ namespace mongo {
// Sharded collection, need to load chunks
metadata->_keyPattern = collInfo.getKeyPattern();
+ metadata->fillKeyPatternFields();
metadata->_shardVersion = ChunkVersion( 0, 0, collInfo.getEpoch() );
metadata->_collVersion = ChunkVersion( 0, 0, collInfo.getEpoch() );
@@ -173,6 +174,7 @@ namespace mongo {
dassert( collInfo.getPrimary() != "" );
metadata->_keyPattern = BSONObj();
+ metadata->fillKeyPatternFields();
metadata->_shardVersion = ChunkVersion( 1, 0, collInfo.getEpoch() );
metadata->_collVersion = metadata->_shardVersion;
diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js
index 944ac4b087d..f5f0b18ef78 100644
--- a/src/mongo/shell/assert.js
+++ b/src/mongo/shell/assert.js
@@ -308,3 +308,41 @@ assert.close = function(a, b, msg, places){
doassert(a + " is not equal to " + b + " within " + places +
" places, diff: " + (a-b) + " : " + msg);
};
+
+assert.gleSuccess = function(db, msg) {
+ var gle = db.getLastErrorObj();
+ if (gle.err) {
+ if (typeof(msg) == "function")
+ msg = msg(gle);
+ doassert("getLastError not null:" + tojson(gle) + " :" + msg);
+ }
+}
+
+assert.gleError = function(db, msg) {
+ var gle = db.getLastErrorObj();
+ if (!gle.err) {
+ if (typeof(msg) == "function")
+ msg = msg(gle);
+ doassert("getLastError is null: " + tojson(gle) + " :" + msg);
+ }
+}
+
+assert.gleErrorCode = function(db, code, msg) {
+ var gle = db.getLastErrorObj();
+ if (gle.err && (gle.code == code)) {
+ if (typeof(msg) == "function")
+ msg = msg(gle);
+ doassert("getLastError not null or missing code( " + code + "): "
+ + tojson(gle) + " :" + msg);
+ }
+}
+
+assert.gleErrorRegex = function(db, regex, msg) {
+ var gle = db.getLastErrorObj();
+ if (!gle.err || !regex.test(gle.err)) {
+ if (typeof(msg) == "function")
+ msg = msg(gle);
+ doassert("getLastError is null or doesn't match regex (" + regex + "): "
+ + tojson(gle) + " :" + msg);
+ }
+}