1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
// Test replication of an array by $push-ing to a missing field in the presence of a sparse index on
// the field. SERVER-4907
function testWithCollectionIndexIds( capped, sparse, useIds ) {
printjson( { capped:capped, sparse:sparse, useIds:useIds } );
rt = new ReplTest( "repl14tests" );
m = rt.start( true ); // master
if ( capped ) {
m.getDB( 'd' ).createCollection( 'c', { capped:true, size:5*1024 } );
}
mc = m.getDB( 'd' )[ 'c' ]; // master collection
mc.ensureIndex( {a:1}, {sparse:sparse} );
toInsert = {};
if ( capped ) {
// Add a singleton array as padding, so the push later on will not change document size.
toInsert = {p:[1]};
}
if ( useIds ) { // Insert wiith an auto generated _id.
mc.insert( toInsert );
}
else { // Otherwise avoid the auto generated _id.
mc._mongo.insert( mc._fullName, toInsert, 0 );
}
assert.eq(mc.count(), 1);
s = rt.start( false ); // slave
sc = s.getDB( 'd' )[ 'c' ]; // slave collection
// Wait for the document to be cloned.
assert.soon( function() { return sc.count() > 0; },"doc not replicated soon enough", 60*1000 );
modifiers = {$push:{a:1}};
if ( capped ) {
// Delete our singleton array to balance the new singleton array we're going to create.
modifiers['$unset'] = {p:1};
}
assert.writeOK(mc.update( {}, modifiers ));
// Wait for the update to be replicated.
assert.soon( function() { return sc.count( {a:1} ) > 0; } );
rt.stop();
}
function testWithCollectionIndex( capped, sparse ) {
testWithCollectionIndexIds( capped, sparse, true );
if ( capped ) {
testWithCollectionIndexIds( capped, sparse, false );
}
}
function testWithCollection( capped ) {
testWithCollectionIndex( capped, true );
testWithCollectionIndex( capped, false );
}
function test() {
testWithCollection( true );
testWithCollection( false );
}
test();
|