summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJudah Schvimer <judah@mongodb.com>2015-12-16 15:41:49 -0500
committerJudah Schvimer <judah@mongodb.com>2015-12-16 15:41:49 -0500
commite3463ed7d542e2144c681585bdd7b08c591717b9 (patch)
tree1069a0b557fa07db35c20b2bcbe2e9190c23c5a2
parentc4c115b56a7d55547ff04e17baedc11a53a02fc0 (diff)
downloadmongo-e3463ed7d542e2144c681585bdd7b08c591717b9.tar.gz
TOOLS-954 Upgraded mgo to v2
-rw-r--r--Godeps2
-rw-r--r--vendor/src/gopkg.in/mgo.v2/bson/bson.go21
-rw-r--r--vendor/src/gopkg.in/mgo.v2/bson/bson_test.go94
-rw-r--r--vendor/src/gopkg.in/mgo.v2/bson/decode.go25
-rw-r--r--vendor/src/gopkg.in/mgo.v2/bson/encode.go8
-rwxr-xr-xvendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh27
-rw-r--r--vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go241
-rw-r--r--vendor/src/gopkg.in/mgo.v2/bulk.go251
-rw-r--r--vendor/src/gopkg.in/mgo.v2/bulk_test.go264
-rw-r--r--vendor/src/gopkg.in/mgo.v2/cluster.go44
-rw-r--r--vendor/src/gopkg.in/mgo.v2/cluster_test.go169
-rw-r--r--vendor/src/gopkg.in/mgo.v2/doc.go2
-rw-r--r--vendor/src/gopkg.in/mgo.v2/gridfs.go2
-rw-r--r--vendor/src/gopkg.in/mgo.v2/server.go7
-rw-r--r--vendor/src/gopkg.in/mgo.v2/session.go427
-rw-r--r--vendor/src/gopkg.in/mgo.v2/session_test.go338
-rw-r--r--vendor/src/gopkg.in/mgo.v2/socket.go71
-rw-r--r--vendor/src/gopkg.in/mgo.v2/suite_test.go17
-rw-r--r--vendor/src/gopkg.in/mgo.v2/testdb/client.pem93
-rw-r--r--vendor/src/gopkg.in/mgo.v2/testdb/dropall.js2
-rw-r--r--vendor/src/gopkg.in/mgo.v2/testdb/init.js28
-rw-r--r--vendor/src/gopkg.in/mgo.v2/testdb/server.pem79
-rwxr-xr-xvendor/src/gopkg.in/mgo.v2/testdb/setup.sh2
-rw-r--r--vendor/src/gopkg.in/mgo.v2/testdb/supervisord.conf5
-rw-r--r--vendor/src/gopkg.in/mgo.v2/testdb/wait.js11
25 files changed, 1838 insertions, 392 deletions
diff --git a/Godeps b/Godeps
index 3857afa01e3..18fde779e1e 100644
--- a/Godeps
+++ b/Godeps
@@ -1,4 +1,4 @@
-gopkg.in/mgo.v2 c36a379eb352352dc9bf0833598db244a47e9d1a github.com/10gen/mgo
+gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
gopkg.in/tomb.v2 14b3d72120e8d10ea6e6b7f87f7175734b1faab8 github.com/go-tomb/tomb
github.com/jacobsa/oglematchers 3ecefc49db07722beca986d9bb71ddd026b133f0
github.com/smartystreets/goconvey eb2e83c1df892d2c9ad5a3c85672da30be585dfd
diff --git a/vendor/src/gopkg.in/mgo.v2/bson/bson.go b/vendor/src/gopkg.in/mgo.v2/bson/bson.go
index 41816b874b2..f1f9ab747f7 100644
--- a/vendor/src/gopkg.in/mgo.v2/bson/bson.go
+++ b/vendor/src/gopkg.in/mgo.v2/bson/bson.go
@@ -189,15 +189,25 @@ func IsObjectIdHex(s string) bool {
// objectIdCounter is atomically incremented when generating a new ObjectId
// using NewObjectId() function. It's used as a counter part of an id.
-var objectIdCounter uint32 = 0
+var objectIdCounter uint32 = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+ var b [4]byte
+ _, err := io.ReadFull(rand.Reader, b[:])
+ if err != nil {
+ panic(fmt.Errorf("cannot read random object id: %v", err))
+ }
+ return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
-// readMachineId generates machine id and puts it into the machineId global
-// variable. If this function fails to get the hostname, it will cause
-// a runtime error.
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
func readMachineId() []byte {
var sum [3]byte
id := sum[:]
@@ -421,7 +431,8 @@ func handleErr(err *error) {
}
// Marshal serializes the in value, which may be a map or a struct value.
-// In the case of struct values, only exported fields will be serialized.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
diff --git a/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go b/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go
index eb2e9f41e14..beabbb65ef8 100644
--- a/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go
+++ b/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go
@@ -29,15 +29,18 @@ package bson_test
import (
"encoding/binary"
+ "encoding/hex"
"encoding/json"
"errors"
"net/url"
"reflect"
+ "strings"
"testing"
"time"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2/bson"
+ "gopkg.in/yaml.v2"
)
func TestAll(t *testing.T) {
@@ -582,8 +585,12 @@ var marshalErrorItems = []testItemType{
"Can't marshal complex128 in a BSON document"},
{&structWithDupKeys{},
"Duplicated key 'name' in struct bson_test.structWithDupKeys"},
- {bson.Raw{0x0A, []byte{}},
- "Attempted to unmarshal Raw kind 10 as a document"},
+ {bson.Raw{0xA, []byte{}},
+ "Attempted to marshal Raw kind 10 as a document"},
+ {bson.Raw{0x3, []byte{}},
+ "Attempted to marshal empty Raw document"},
+ {bson.M{"w": bson.Raw{0x3, []byte{}}},
+ "Attempted to marshal empty Raw document"},
{&inlineCantPtr{&struct{ A, B int }{1, 2}},
"Option ,inline needs a struct value or map field"},
{&inlineDupName{1, struct{ A, B int }{2, 3}},
@@ -635,6 +642,10 @@ var unmarshalErrorItems = []unmarshalErrorType{
{123,
"\x10name\x00\x08\x00\x00\x00",
"Unmarshal needs a map or a pointer to a struct."},
+
+ {nil,
+ "\x08\x62\x00\x02",
+ "encoded boolean must be 1 or 0, found 2"},
}
func (s *S) TestUnmarshalErrorItems(c *C) {
@@ -687,7 +698,7 @@ func (s *S) TestUnmarshalRawErrorItems(c *C) {
}
var corruptedData = []string{
- "\x04\x00\x00\x00\x00", // Shorter than minimum
+ "\x04\x00\x00\x00\x00", // Document shorter than minimum
"\x06\x00\x00\x00\x00", // Not enough data
"\x05\x00\x00", // Broken length
"\x05\x00\x00\x00\xff", // Corrupted termination
@@ -704,6 +715,15 @@ var corruptedData = []string{
// String with corrupted end.
wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"),
+
+ // String with negative length (issue #116).
+ "\x0c\x00\x00\x00\x02x\x00\xff\xff\xff\xff\x00",
+
+ // String with zero length (must include trailing '\x00')
+ "\x0c\x00\x00\x00\x02x\x00\x00\x00\x00\x00\x00",
+
+ // Binary with negative length.
+ "\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00",
}
func (s *S) TestUnmarshalMapDocumentTooShort(c *C) {
@@ -979,6 +999,9 @@ type condTime struct {
type condStruct struct {
V struct{ A []int } ",omitempty"
}
+type condRaw struct {
+ V bson.Raw ",omitempty"
+}
type shortInt struct {
V int64 ",minsize"
@@ -1232,6 +1255,9 @@ var twoWayCrossItems = []crossTypeItem{
{&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}},
{&condStruct{struct{ A []int }{}}, bson.M{}},
+ {&condRaw{bson.Raw{Kind: 0x0A, Data: []byte{}}}, bson.M{"v": nil}},
+ {&condRaw{bson.Raw{Kind: 0x00}}, bson.M{}},
+
{&namedCondStr{"yo"}, map[string]string{"myv": "yo"}},
{&namedCondStr{}, map[string]string{}},
@@ -1254,6 +1280,9 @@ var twoWayCrossItems = []crossTypeItem{
{&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}},
{&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}},
+ // []byte <=> Binary
+ {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}},
+
// []byte <=> MyBytes
{&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}},
{&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}},
@@ -1324,6 +1353,9 @@ var oneWayCrossItems = []crossTypeItem{
{&struct {
V struct{ v time.Time } ",omitempty"
}{}, map[string]interface{}{}},
+
+ // Attempt to marshal slice into RawD (issue #120).
+ {bson.M{"x": []int{1, 2, 3}}, &struct{ X bson.RawD }{}},
}
func testCrossPair(c *C, dump interface{}, load interface{}) {
@@ -1535,6 +1567,62 @@ func (s *S) TestObjectIdJSONMarshaling(c *C) {
}
}
+type specTest struct {
+ Description string
+ Documents []struct {
+ Decoded map[string]interface{}
+ Encoded string
+ DecodeOnly bool `yaml:"decodeOnly"`
+ Error interface{}
+ }
+}
+
+func (s *S) TestSpecTests(c *C) {
+ for _, data := range specTests {
+ var test specTest
+ err := yaml.Unmarshal([]byte(data), &test)
+ c.Assert(err, IsNil)
+
+ c.Logf("Running spec test set %q", test.Description)
+
+ for _, doc := range test.Documents {
+ if doc.Error != nil {
+ continue
+ }
+ c.Logf("Ensuring %q decodes as %v", doc.Encoded, doc.Decoded)
+ var decoded map[string]interface{}
+ encoded, err := hex.DecodeString(doc.Encoded)
+ c.Assert(err, IsNil)
+ err = bson.Unmarshal(encoded, &decoded)
+ c.Assert(err, IsNil)
+ c.Assert(decoded, DeepEquals, doc.Decoded)
+ }
+
+ for _, doc := range test.Documents {
+ if doc.DecodeOnly || doc.Error != nil {
+ continue
+ }
+ c.Logf("Ensuring %v encodes as %q", doc.Decoded, doc.Encoded)
+ encoded, err := bson.Marshal(doc.Decoded)
+ c.Assert(err, IsNil)
+ c.Assert(strings.ToUpper(hex.EncodeToString(encoded)), Equals, doc.Encoded)
+ }
+
+ for _, doc := range test.Documents {
+ if doc.Error == nil {
+ continue
+ }
+ c.Logf("Ensuring %q errors when decoded: %s", doc.Encoded, doc.Error)
+ var decoded map[string]interface{}
+ encoded, err := hex.DecodeString(doc.Encoded)
+ c.Assert(err, IsNil)
+ err = bson.Unmarshal(encoded, &decoded)
+ c.Assert(err, NotNil)
+ c.Logf("Failed with: %v", err)
+ }
+ }
+}
+
// --------------------------------------------------------------------------
// Some simple benchmarks.
diff --git a/vendor/src/gopkg.in/mgo.v2/bson/decode.go b/vendor/src/gopkg.in/mgo.v2/bson/decode.go
index bdd2e028737..0ee8d22d9ad 100644
--- a/vendor/src/gopkg.in/mgo.v2/bson/decode.go
+++ b/vendor/src/gopkg.in/mgo.v2/bson/decode.go
@@ -325,6 +325,10 @@ func (d *decoder) readArrayDocTo(out reflect.Value) {
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
tmp := make([]reflect.Value, 0, 8)
elemType := t.Elem()
+ if elemType == typeRawDocElem {
+ d.dropElem(0x04)
+ return reflect.Zero(t).Interface()
+ }
end := int(d.readInt32())
end += d.i - 4
@@ -437,7 +441,7 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
start := d.i
- if kind == '\x03' {
+ if kind == 0x03 {
// Delegate unmarshaling of documents.
outt := out.Type()
outk := out.Kind()
@@ -723,6 +727,12 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
out.Set(reflect.ValueOf(u).Elem())
return true
}
+ if outt == typeBinary {
+ if b, ok := in.([]byte); ok {
+ out.Set(reflect.ValueOf(Binary{Data: b}))
+ return true
+ }
+ }
}
return false
@@ -776,10 +786,14 @@ func (d *decoder) readCStr() string {
}
func (d *decoder) readBool() bool {
- if d.readByte() == 1 {
+ b := d.readByte()
+ if b == 0 {
+ return false
+ }
+ if b == 1 {
return true
}
- return false
+ panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
}
func (d *decoder) readFloat64() float64 {
@@ -816,9 +830,12 @@ func (d *decoder) readByte() byte {
}
func (d *decoder) readBytes(length int32) []byte {
+ if length < 0 {
+ corrupted()
+ }
start := d.i
d.i += int(length)
- if d.i > len(d.in) {
+ if d.i < start || d.i > len(d.in) {
corrupted()
}
return d.in[start : start+int(length)]
diff --git a/vendor/src/gopkg.in/mgo.v2/bson/encode.go b/vendor/src/gopkg.in/mgo.v2/bson/encode.go
index e1015091b66..36eb29ce671 100644
--- a/vendor/src/gopkg.in/mgo.v2/bson/encode.go
+++ b/vendor/src/gopkg.in/mgo.v2/bson/encode.go
@@ -101,7 +101,10 @@ func (e *encoder) addDoc(v reflect.Value) {
if v.Type() == typeRaw {
raw := v.Interface().(Raw)
if raw.Kind != 0x03 && raw.Kind != 0x00 {
- panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+ panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+ }
+ if len(raw.Data) == 0 {
+ panic("Attempted to marshal empty Raw document")
}
e.addBytes(raw.Data...)
return
@@ -389,6 +392,9 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if kind == 0x00 {
kind = 0x03
}
+ if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+ panic("Attempted to marshal empty Raw document")
+ }
e.addElemName(kind, name)
e.addBytes(s.Data...)
diff --git a/vendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh b/vendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh
new file mode 100755
index 00000000000..1efd3d3b66d
--- /dev/null
+++ b/vendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -e
+
+if [ ! -d specifications ]; then
+ git clone -b bson git@github.com:jyemin/specifications
+fi
+
+TESTFILE="../specdata_test.go"
+
+cat <<END > $TESTFILE
+package bson_test
+
+var specTests = []string{
+END
+
+for file in specifications/source/bson/tests/*.yml; do
+ (
+ echo '`'
+ cat $file
+ echo -n '`,'
+ ) >> $TESTFILE
+done
+
+echo '}' >> $TESTFILE
+
+gofmt -w $TESTFILE
diff --git a/vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go b/vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go
new file mode 100644
index 00000000000..513f9b209c7
--- /dev/null
+++ b/vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go
@@ -0,0 +1,241 @@
+package bson_test
+
+var specTests = []string{
+ `
+---
+description: "Array type"
+documents:
+ -
+ decoded:
+ a : []
+ encoded: 0D000000046100050000000000
+ -
+ decoded:
+ a: [10]
+ encoded: 140000000461000C0000001030000A0000000000
+ -
+ # Decode an array that uses an empty string as the key
+ decodeOnly : true
+ decoded:
+ a: [10]
+ encoded: 130000000461000B00000010000A0000000000
+ -
+ # Decode an array that uses a non-numeric string as the key
+ decodeOnly : true
+ decoded:
+ a: [10]
+ encoded: 150000000461000D000000106162000A0000000000
+
+
+`, `
+---
+description: "Boolean type"
+documents:
+ -
+ encoded: "090000000862000100"
+ decoded: { "b" : true }
+ -
+ encoded: "090000000862000000"
+ decoded: { "b" : false }
+
+
+ `, `
+---
+description: "Corrupted BSON"
+documents:
+ -
+ encoded: "09000000016600"
+ error: "truncated double"
+ -
+ encoded: "09000000026600"
+ error: "truncated string"
+ -
+ encoded: "09000000036600"
+ error: "truncated document"
+ -
+ encoded: "09000000046600"
+ error: "truncated array"
+ -
+ encoded: "09000000056600"
+ error: "truncated binary"
+ -
+ encoded: "09000000076600"
+ error: "truncated objectid"
+ -
+ encoded: "09000000086600"
+ error: "truncated boolean"
+ -
+ encoded: "09000000096600"
+ error: "truncated date"
+ -
+ encoded: "090000000b6600"
+ error: "truncated regex"
+ -
+ encoded: "090000000c6600"
+ error: "truncated db pointer"
+ -
+ encoded: "0C0000000d6600"
+ error: "truncated javascript"
+ -
+ encoded: "0C0000000e6600"
+ error: "truncated symbol"
+ -
+ encoded: "0C0000000f6600"
+ error: "truncated javascript with scope"
+ -
+ encoded: "0C000000106600"
+ error: "truncated int32"
+ -
+ encoded: "0C000000116600"
+ error: "truncated timestamp"
+ -
+ encoded: "0C000000126600"
+ error: "truncated int64"
+ -
+ encoded: "0400000000"
+ error: basic
+ -
+ encoded: "0500000001"
+ error: basic
+ -
+ encoded: "05000000"
+ error: basic
+ -
+ encoded: "0700000002610078563412"
+ error: basic
+ -
+ encoded: "090000001061000500"
+ error: basic
+ -
+ encoded: "00000000000000000000"
+ error: basic
+ -
+ encoded: "1300000002666f6f00040000006261720000"
+ error: "basic"
+ -
+ encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
+ error: basic
+ -
+ encoded: "1500000003666f6f000c0000000862617200010000"
+ error: basic
+ -
+ encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
+ error: basic
+ -
+ encoded: "1000000002610004000000616263ff00"
+ error: string is not null-terminated
+ -
+ encoded: "0c0000000200000000000000"
+ error: bad_string_length
+ -
+ encoded: "120000000200ffffffff666f6f6261720000"
+ error: bad_string_length
+ -
+ encoded: "0c0000000e00000000000000"
+ error: bad_string_length
+ -
+ encoded: "120000000e00ffffffff666f6f6261720000"
+ error: bad_string_length
+ -
+ encoded: "180000000c00fa5bd841d6585d9900"
+ error: ""
+ -
+ encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
+ error: bad_string_length
+ -
+ encoded: "0c0000000d00000000000000"
+ error: bad_string_length
+ -
+ encoded: "0c0000000d00ffffffff0000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f001500000000000000000c000000020001000000000000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f001500000001000000000c000000020000000000000000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
+ error: bad_string_length
+ -
+ encoded: "0E00000008616263646566676869707172737475"
+ error: "Run-on CString"
+ -
+ encoded: "0100000000"
+ error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
+ -
+ encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
+ error: "One object, but with object size listed smaller than it is in the data"
+ -
+ encoded: "05000000"
+ error: "One object, missing the EOO at the end"
+ -
+ encoded: "0500000001"
+ error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
+ -
+ encoded: "05000000ff"
+ error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
+ -
+ encoded: "0500000070"
+ error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
+ -
+ encoded: "07000000000000"
+ error: "Invalid BSON type low range"
+ -
+ encoded: "07000000800000"
+ error: "Invalid BSON type high range"
+ -
+ encoded: "090000000862000200"
+ error: "Invalid boolean value of 2"
+ -
+ encoded: "09000000086200ff00"
+ error: "Invalid boolean value of -1"
+ `, `
+---
+description: "Int32 type"
+documents:
+ -
+ decoded:
+ i: -2147483648
+ encoded: 0C0000001069000000008000
+ -
+ decoded:
+ i: 2147483647
+ encoded: 0C000000106900FFFFFF7F00
+ -
+ decoded:
+ i: -1
+ encoded: 0C000000106900FFFFFFFF00
+ -
+ decoded:
+ i: 0
+ encoded: 0C0000001069000000000000
+ -
+ decoded:
+ i: 1
+ encoded: 0C0000001069000100000000
+
+`, `
+---
+description: "String type"
+documents:
+ -
+ decoded:
+ s : ""
+ encoded: 0D000000027300010000000000
+ -
+ decoded:
+ s: "a"
+ encoded: 0E00000002730002000000610000
+ -
+ decoded:
+ s: "This is a string"
+ encoded: 1D0000000273001100000054686973206973206120737472696E670000
+ -
+ decoded:
+ s: "κόσμε"
+ encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
+`}
diff --git a/vendor/src/gopkg.in/mgo.v2/bulk.go b/vendor/src/gopkg.in/mgo.v2/bulk.go
index 23f4508535c..c377af56352 100644
--- a/vendor/src/gopkg.in/mgo.v2/bulk.go
+++ b/vendor/src/gopkg.in/mgo.v2/bulk.go
@@ -1,10 +1,14 @@
package mgo
+import (
+ "bytes"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
// Bulk represents an operation that can be prepared with several
// orthogonal changes before being delivered to the server.
//
-// WARNING: This API is still experimental.
-//
// Relevant documentation:
//
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
@@ -12,19 +16,39 @@ package mgo
type Bulk struct {
c *Collection
ordered bool
- inserts []interface{}
+ actions []bulkAction
+}
+
+type bulkOp int
+
+const (
+ bulkInsert bulkOp = iota + 1
+ bulkUpdate
+ bulkUpdateAll
+ bulkRemove
+)
+
+type bulkAction struct {
+ op bulkOp
+ docs []interface{}
}
+type bulkUpdateOp []interface{}
+type bulkDeleteOp []interface{}
+
// BulkError holds an error returned from running a Bulk operation.
//
// TODO: This is private for the moment, until we understand exactly how
// to report these multi-errors in a useful and convenient way.
type bulkError struct {
- err error
+ errs []error
}
// BulkResult holds the results for a bulk operation.
type BulkResult struct {
+ Matched int
+ Modified int // Available only for MongoDB 2.6+
+
// Be conservative while we understand exactly how to report these
// results in a useful and convenient way, and also how to emulate
// them with prior servers.
@@ -32,13 +56,35 @@ type BulkResult struct {
}
func (e *bulkError) Error() string {
- return e.err.Error()
+ if len(e.errs) == 0 {
+ return "invalid bulkError instance: no errors"
+ }
+ if len(e.errs) == 1 {
+ return e.errs[0].Error()
+ }
+ msgs := make([]string, 0, len(e.errs))
+ seen := make(map[string]bool)
+ for _, err := range e.errs {
+ msg := err.Error()
+ if !seen[msg] {
+ seen[msg] = true
+ msgs = append(msgs, msg)
+ }
+ }
+ if len(msgs) == 1 {
+ return msgs[0]
+ }
+ var buf bytes.Buffer
+ buf.WriteString("multiple errors in bulk operation:\n")
+ for _, msg := range msgs {
+ buf.WriteString(" - ")
+ buf.WriteString(msg)
+ buf.WriteByte('\n')
+ }
+ return buf.String()
}
// Bulk returns a value to prepare the execution of a bulk operation.
-//
-// WARNING: This API is still experimental.
-//
func (c *Collection) Bulk() *Bulk {
return &Bulk{c: c, ordered: true}
}
@@ -52,20 +98,197 @@ func (b *Bulk) Unordered() {
b.ordered = false
}
+func (b *Bulk) action(op bulkOp) *bulkAction {
+ if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
+ return &b.actions[len(b.actions)-1]
+ }
+ if !b.ordered {
+ for i := range b.actions {
+ if b.actions[i].op == op {
+ return &b.actions[i]
+ }
+ }
+ }
+ b.actions = append(b.actions, bulkAction{op: op})
+ return &b.actions[len(b.actions)-1]
+}
+
// Insert queues up the provided documents for insertion.
func (b *Bulk) Insert(docs ...interface{}) {
- b.inserts = append(b.inserts, docs...)
+ action := b.action(bulkInsert)
+ action.docs = append(action.docs, docs...)
+}
+
+// Remove queues up the provided selectors for removing matching documents.
+// Each selector will remove only a single matching document.
+func (b *Bulk) Remove(selectors ...interface{}) {
+ action := b.action(bulkRemove)
+ for _, selector := range selectors {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &deleteOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Flags: 1,
+ Limit: 1,
+ })
+ }
+}
+
+// RemoveAll queues up the provided selectors for removing all matching documents.
+// Each selector will remove all matching documents.
+func (b *Bulk) RemoveAll(selectors ...interface{}) {
+ action := b.action(bulkRemove)
+ for _, selector := range selectors {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &deleteOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Flags: 0,
+ Limit: 0,
+ })
+ }
+}
+
+// Update queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Update(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.Update requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ })
+ }
+}
+
+// UpdateAll queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair updates all documents matching the selector.
+func (b *Bulk) UpdateAll(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.UpdateAll requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ Flags: 2,
+ Multi: true,
+ })
+ }
+}
+
+// Upsert queues up the provided pairs of upserting instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Upsert(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.Update requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ Flags: 1,
+ Upsert: true,
+ })
+ }
}
// Run runs all the operations queued up.
+//
+// If an error is reported on an unordered bulk operation, the error value may
+// be an aggregation of all issues observed. As an exception to that, Insert
+// operations running on MongoDB versions prior to 2.6 will report the last
+// error only due to a limitation in the wire protocol.
func (b *Bulk) Run() (*BulkResult, error) {
- op := &insertOp{b.c.FullName, b.inserts, 0}
+ var result BulkResult
+ var berr bulkError
+ var failed bool
+ for i := range b.actions {
+ action := &b.actions[i]
+ var ok bool
+ switch action.op {
+ case bulkInsert:
+ ok = b.runInsert(action, &result, &berr)
+ case bulkUpdate:
+ ok = b.runUpdate(action, &result, &berr)
+ case bulkRemove:
+ ok = b.runRemove(action, &result, &berr)
+ default:
+ panic("unknown bulk operation")
+ }
+ if !ok {
+ failed = true
+ if b.ordered {
+ break
+ }
+ }
+ }
+ if failed {
+ return nil, &berr
+ }
+ return &result, nil
+}
+
+func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError) bool {
+ op := &insertOp{b.c.FullName, action.docs, 0}
if !b.ordered {
op.flags = 1 // ContinueOnError
}
- _, err := b.c.writeQuery(op)
- if err != nil {
- return nil, &bulkError{err}
+ lerr, err := b.c.writeOp(op, b.ordered)
+ return b.checkSuccess(berr, lerr, err)
+}
+
+func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool {
+ lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
+ result.Matched += lerr.N
+ result.Modified += lerr.modified
+ return b.checkSuccess(berr, lerr, err)
+}
+
+func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *bulkError) bool {
+ lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
+ result.Matched += lerr.N
+ result.Modified += lerr.modified
+ return b.checkSuccess(berr, lerr, err)
+}
+
+func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool {
+ if lerr != nil && len(lerr.errors) > 0 {
+ berr.errs = append(berr.errs, lerr.errors...)
+ return false
+ } else if err != nil {
+ berr.errs = append(berr.errs, err)
+ return false
}
- return &BulkResult{}, nil
+ return true
}
diff --git a/vendor/src/gopkg.in/mgo.v2/bulk_test.go b/vendor/src/gopkg.in/mgo.v2/bulk_test.go
index d231d59d0e7..9aad81249dd 100644
--- a/vendor/src/gopkg.in/mgo.v2/bulk_test.go
+++ b/vendor/src/gopkg.in/mgo.v2/bulk_test.go
@@ -1,6 +1,6 @@
// mgo - MongoDB driver for Go
//
-// Copyright (c) 2010-2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
+// Copyright (c) 2010-2015 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
@@ -61,6 +61,7 @@ func (s *S) TestBulkInsertError(c *C) {
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
+ c.Assert(mgo.IsDup(err), Equals, true)
type doc struct {
N int `_id`
@@ -129,3 +130,264 @@ func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
c.Assert(err, IsNil)
c.Assert(res.Id, Equals, 1500)
}
+
+func (s *S) TestBulkError(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ // If it's just the same string multiple times, join it into a single message.
+ bulk := coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2})
+ _, err = bulk.Run()
+ c.Assert(err, ErrorMatches, ".*duplicate key.*")
+ c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key")
+ c.Assert(mgo.IsDup(err), Equals, true)
+
+ // With matching errors but different messages, present them all.
+ bulk = coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"})
+ _, err = bulk.Run()
+ if s.versionAtLeast(2, 6) {
+ c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$")
+ c.Assert(err, ErrorMatches, "(?s).*dupone.*")
+ c.Assert(err, ErrorMatches, "(?s).*duptwo.*")
+ } else {
+ // Wire protocol query doesn't return all errors.
+ c.Assert(err, ErrorMatches, ".*duplicate.*")
+ }
+ c.Assert(mgo.IsDup(err), Equals, true)
+
+ // With mixed errors, present them all.
+ bulk = coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"_id": 1}, M{"_id": []int{2}})
+ _, err = bulk.Run()
+ if s.versionAtLeast(2, 6) {
+ c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$")
+ } else {
+ // Wire protocol query doesn't return all errors.
+ c.Assert(err, ErrorMatches, ".*array.*")
+ }
+ c.Assert(mgo.IsDup(err), Equals, false)
+}
+
+func (s *S) TestBulkUpdate(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}})
+ bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}})
+ bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+ bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 4)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(r.Modified, Equals, 3)
+ }
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}})
+}
+
+func (s *S) TestBulkUpdateError(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Update(
+ M{"n": 1}, M{"$set": M{"n": 10}},
+ M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+ M{"n": 3}, M{"$set": M{"n": 30}},
+ )
+ r, err := bulk.Run()
+ c.Assert(err, ErrorMatches, ".*_id.*")
+ c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}})
+}
+
+func (s *S) TestBulkUpdateErrorUnordered(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Unordered()
+ bulk.Update(
+ M{"n": 1}, M{"$set": M{"n": 10}},
+ M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+ M{"n": 3}, M{"$set": M{"n": 30}},
+ )
+ r, err := bulk.Run()
+ c.Assert(err, ErrorMatches, ".*_id.*")
+ c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}})
+}
+
+func (s *S) TestBulkUpdateAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}})
+ bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}})
+ bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+ bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 6)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(r.Modified, Equals, 5)
+ }
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}})
+}
+
+func (s *S) TestBulkMixedUnordered(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ // Abuse undefined behavior to ensure the desired implementation is in place.
+ bulk := coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"n": 1})
+ bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}})
+ bulk.Insert(M{"n": 2})
+ bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}})
+ bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}})
+ bulk.Insert(M{"n": 3})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 3)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(r.Modified, Equals, 3)
+ }
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}})
+}
+
+func (s *S) TestBulkUpsert(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}})
+ bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}})
+}
+
+func (s *S) TestBulkRemove(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Remove(M{"n": 1})
+ bulk.Remove(M{"n": 2}, M{"n": 4})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 3)
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{3}, {4}})
+}
+
+func (s *S) TestBulkRemoveAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.RemoveAll(M{"n": 1})
+ bulk.RemoveAll(M{"n": 2}, M{"n": 4})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 4)
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{3}})
+}
+
diff --git a/vendor/src/gopkg.in/mgo.v2/cluster.go b/vendor/src/gopkg.in/mgo.v2/cluster.go
index 3c7394ded1a..e28af5b4568 100644
--- a/vendor/src/gopkg.in/mgo.v2/cluster.go
+++ b/vendor/src/gopkg.in/mgo.v2/cluster.go
@@ -30,12 +30,12 @@ import (
"errors"
"fmt"
"net"
+ "strconv"
+ "strings"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
- "strconv"
- "strings"
)
// ---------------------------------------------------------------------------
@@ -425,14 +425,44 @@ func resolveAddr(addr string) (*net.TCPAddr, error) {
}
}
- // This unfortunate hack allows having a timeout on address resolution.
- conn, err := net.DialTimeout("udp4", addr, 10*time.Second)
- if err != nil {
+ // Attempt to resolve IPv4 and v6 concurrently.
+ addrChan := make(chan *net.TCPAddr, 2)
+ for _, network := range []string{"udp4", "udp6"} {
+ network := network
+ go func() {
+ // The unfortunate UDP dialing hack allows having a timeout on address resolution.
+ conn, err := net.DialTimeout(network, addr, 10*time.Second)
+ if err != nil {
+ addrChan <- nil
+ } else {
+ addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
+ conn.Close()
+ }
+ }()
+ }
+
+ // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
+ tcpaddr := <-addrChan
+ if tcpaddr == nil || len(tcpaddr.IP) != 4 {
+ var timeout <-chan time.Time
+ if tcpaddr != nil {
+ // Don't wait too long if an IPv6 address is known.
+ timeout = time.After(50 * time.Millisecond)
+ }
+ select {
+ case <-timeout:
+ case tcpaddr2 := <-addrChan:
+ if tcpaddr == nil || tcpaddr2 != nil {
+ // It's an IPv4 address or the only known address. Use it.
+ tcpaddr = tcpaddr2
+ }
+ }
+ }
+
+ if tcpaddr == nil {
log("SYNC Failed to resolve server address: ", addr)
return nil, errors.New("failed to resolve server address: " + addr)
}
- tcpaddr := (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
- conn.Close()
if tcpaddr.String() != addr {
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
}
diff --git a/vendor/src/gopkg.in/mgo.v2/cluster_test.go b/vendor/src/gopkg.in/mgo.v2/cluster_test.go
index 53270284e5a..65cd4a40779 100644
--- a/vendor/src/gopkg.in/mgo.v2/cluster_test.go
+++ b/vendor/src/gopkg.in/mgo.v2/cluster_test.go
@@ -877,9 +877,9 @@ func (s *S) TestPreserveSocketCountOnSync(c *C) {
defer session.Close()
stats := mgo.GetStats()
- for stats.MasterConns+stats.SlaveConns != 3 {
+ for stats.SocketsAlive != 3 {
+ c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive)
stats = mgo.GetStats()
- c.Log("Waiting for all connections to be established...")
time.Sleep(5e8)
}
@@ -1240,25 +1240,26 @@ func (s *S) TestFailFast(c *C) {
c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true)
}
-type OpCounters struct {
- Insert int
- Query int
- Update int
- Delete int
- GetMore int
- Command int
-}
-
-func getOpCounters(server string) (c *OpCounters, err error) {
+func (s *S) countQueries(c *C, server string) (n int) {
+ defer func() { c.Logf("Queries for %q: %d", server, n) }()
session, err := mgo.Dial(server + "?connect=direct")
- if err != nil {
- return nil, err
- }
+ c.Assert(err, IsNil)
defer session.Close()
session.SetMode(mgo.Monotonic, true)
- result := struct{ OpCounters }{}
+ var result struct {
+ OpCounters struct {
+ Query int
+ }
+ Metrics struct {
+ Commands struct{ Find struct{ Total int } }
+ }
+ }
err = session.Run("serverStatus", &result)
- return &result.OpCounters, err
+ c.Assert(err, IsNil)
+ if s.versionAtLeast(3, 2) {
+ return result.Metrics.Commands.Find.Total
+ }
+ return result.OpCounters.Query
}
func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) {
@@ -1277,50 +1278,70 @@ func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) {
master := ssresult.Host
c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
- // Collect op counters for everyone.
- opc21a, err := getOpCounters("localhost:40021")
- c.Assert(err, IsNil)
- opc22a, err := getOpCounters("localhost:40022")
- c.Assert(err, IsNil)
- opc23a, err := getOpCounters("localhost:40023")
- c.Assert(err, IsNil)
-
- // Do a SlaveOk query through MongoS
+ // Ensure mongos is aware about the current topology.
+ s.Stop(":40201")
+ s.StartAll()
mongos, err := mgo.Dial("localhost:40202")
c.Assert(err, IsNil)
defer mongos.Close()
+ // Insert some data as otherwise 3.2+ doesn't seem to run the query at all.
+ err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1})
+ c.Assert(err, IsNil)
+
+ // Wait until all servers see the data.
+ for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} {
+ session, err := mgo.Dial(addr + "?connect=direct")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ session.SetMode(mgo.Monotonic, true)
+ for i := 300; i >= 0; i-- {
+ n, err := session.DB("mydb").C("mycoll").Find(nil).Count()
+ c.Assert(err, IsNil)
+ if n == 1 {
+ break
+ }
+ if i == 0 {
+ c.Fatalf("Inserted data never reached " + addr)
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+
+ // Collect op counters for everyone.
+ q21a := s.countQueries(c, "localhost:40021")
+ q22a := s.countQueries(c, "localhost:40022")
+ q23a := s.countQueries(c, "localhost:40023")
+
+ // Do a SlaveOk query through MongoS
+
mongos.SetMode(mgo.Monotonic, true)
coll := mongos.DB("mydb").C("mycoll")
- result := &struct{}{}
+ var result struct{ N int }
for i := 0; i != 5; i++ {
- err := coll.Find(nil).One(result)
- c.Assert(err, Equals, mgo.ErrNotFound)
+ err = coll.Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
}
// Collect op counters for everyone again.
- opc21b, err := getOpCounters("localhost:40021")
- c.Assert(err, IsNil)
- opc22b, err := getOpCounters("localhost:40022")
- c.Assert(err, IsNil)
- opc23b, err := getOpCounters("localhost:40023")
- c.Assert(err, IsNil)
-
- masterPort := master[strings.Index(master, ":")+1:]
+ q21b := s.countQueries(c, "localhost:40021")
+ q22b := s.countQueries(c, "localhost:40022")
+ q23b := s.countQueries(c, "localhost:40023")
var masterDelta, slaveDelta int
- switch masterPort {
+ switch hostPort(master) {
case "40021":
- masterDelta = opc21b.Query - opc21a.Query
- slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query)
+ masterDelta = q21b - q21a
+ slaveDelta = (q22b - q22a) + (q23b - q23a)
case "40022":
- masterDelta = opc22b.Query - opc22a.Query
- slaveDelta = (opc21b.Query - opc21a.Query) + (opc23b.Query - opc23a.Query)
+ masterDelta = q22b - q22a
+ slaveDelta = (q21b - q21a) + (q23b - q23a)
case "40023":
- masterDelta = opc23b.Query - opc23a.Query
- slaveDelta = (opc21b.Query - opc21a.Query) + (opc22b.Query - opc22a.Query)
+ masterDelta = q23b - q23a
+ slaveDelta = (q21b - q21a) + (q22b - q22a)
default:
c.Fatal("Uh?")
}
@@ -1361,10 +1382,23 @@ func (s *S) TestRemovalOfClusterMember(c *C) {
slaveAddr := result.Me
defer func() {
+ config := map[string]string{
+ "40021": `{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}`,
+ "40022": `{_id: 2, host: "127.0.0.1:40022", priority: 0, tags: {rs2: "b"}}`,
+ "40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`,
+ }
master.Refresh()
- master.Run(bson.D{{"$eval", `rs.add("` + slaveAddr + `")`}}, nil)
+ master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil)
master.Close()
slave.Close()
+
+ // Ensure suite syncs up with the changes before next test.
+ s.Stop(":40201")
+ s.StartAll()
+ time.Sleep(8 * time.Second)
+ // TODO Find a better way to find out when mongos is fully aware that all
+ // servers are up. Without that follow up tests that depend on mongos will
+ // break due to their expectation of things being in a working state.
}()
c.Logf("========== Removing slave: %s ==========", slaveAddr)
@@ -1444,12 +1478,11 @@ func (s *S) TestPoolLimitMany(c *C) {
defer session.Close()
stats := mgo.GetStats()
- for stats.MasterConns+stats.SlaveConns != 3 {
+ for stats.SocketsAlive != 3 {
+ c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive)
stats = mgo.GetStats()
- c.Log("Waiting for all connections to be established...")
- time.Sleep(500 * time.Millisecond)
+ time.Sleep(5e8)
}
- c.Assert(stats.SocketsAlive, Equals, 3)
const poolLimit = 64
session.SetPoolLimit(poolLimit)
@@ -1645,7 +1678,7 @@ func (s *S) TestPrimaryShutdownOnAuthShard(c *C) {
}
func (s *S) TestNearestSecondary(c *C) {
- defer mgo.HackPingDelay(3 * time.Second)()
+ defer mgo.HackPingDelay(300 * time.Millisecond)()
rs1a := "127.0.0.1:40011"
rs1b := "127.0.0.1:40012"
@@ -1846,12 +1879,9 @@ func (s *S) TestSelectServersWithMongos(c *C) {
}
// Collect op counters for everyone.
- opc21a, err := getOpCounters("localhost:40021")
- c.Assert(err, IsNil)
- opc22a, err := getOpCounters("localhost:40022")
- c.Assert(err, IsNil)
- opc23a, err := getOpCounters("localhost:40023")
- c.Assert(err, IsNil)
+ q21a := s.countQueries(c, "localhost:40021")
+ q22a := s.countQueries(c, "localhost:40022")
+ q23a := s.countQueries(c, "localhost:40023")
// Do a SlaveOk query through MongoS
mongos, err := mgo.Dial("localhost:40202")
@@ -1878,26 +1908,23 @@ func (s *S) TestSelectServersWithMongos(c *C) {
}
// Collect op counters for everyone again.
- opc21b, err := getOpCounters("localhost:40021")
- c.Assert(err, IsNil)
- opc22b, err := getOpCounters("localhost:40022")
- c.Assert(err, IsNil)
- opc23b, err := getOpCounters("localhost:40023")
- c.Assert(err, IsNil)
+ q21b := s.countQueries(c, "localhost:40021")
+ q22b := s.countQueries(c, "localhost:40022")
+ q23b := s.countQueries(c, "localhost:40023")
switch hostPort(master) {
case "40021":
- c.Check(opc21b.Query-opc21a.Query, Equals, 0)
- c.Check(opc22b.Query-opc22a.Query, Equals, 5)
- c.Check(opc23b.Query-opc23a.Query, Equals, 7)
+ c.Check(q21b-q21a, Equals, 0)
+ c.Check(q22b-q22a, Equals, 5)
+ c.Check(q23b-q23a, Equals, 7)
case "40022":
- c.Check(opc21b.Query-opc21a.Query, Equals, 5)
- c.Check(opc22b.Query-opc22a.Query, Equals, 0)
- c.Check(opc23b.Query-opc23a.Query, Equals, 7)
+ c.Check(q21b-q21a, Equals, 5)
+ c.Check(q22b-q22a, Equals, 0)
+ c.Check(q23b-q23a, Equals, 7)
case "40023":
- c.Check(opc21b.Query-opc21a.Query, Equals, 5)
- c.Check(opc22b.Query-opc22a.Query, Equals, 7)
- c.Check(opc23b.Query-opc23a.Query, Equals, 0)
+ c.Check(q21b-q21a, Equals, 5)
+ c.Check(q22b-q22a, Equals, 7)
+ c.Check(q23b-q23a, Equals, 0)
default:
c.Fatal("Uh?")
}
diff --git a/vendor/src/gopkg.in/mgo.v2/doc.go b/vendor/src/gopkg.in/mgo.v2/doc.go
index 9316c555457..859fd9b8df9 100644
--- a/vendor/src/gopkg.in/mgo.v2/doc.go
+++ b/vendor/src/gopkg.in/mgo.v2/doc.go
@@ -20,7 +20,7 @@
//
// New sessions are typically created by calling session.Copy on the
// initial session obtained at dial time. These new sessions will share
-// the same cluster information and connection cache, and may be easily
+// the same cluster information and connection pool, and may be easily
// handed into other methods and functions for organizing logic.
// Every session created must have its Close method called at the end
// of its life time, so its resources may be put back in the pool or
diff --git a/vendor/src/gopkg.in/mgo.v2/gridfs.go b/vendor/src/gopkg.in/mgo.v2/gridfs.go
index 54b3dd50e28..e8159a06165 100644
--- a/vendor/src/gopkg.in/mgo.v2/gridfs.go
+++ b/vendor/src/gopkg.in/mgo.v2/gridfs.go
@@ -692,7 +692,7 @@ func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
// Read reads into b the next available data from the file and
// returns the number of bytes written and an error in case
// something wrong happened. At the end of the file, n will
-// be zero and err will be set to os.EOF.
+// be zero and err will be set to io.EOF.
//
// The parameters and behavior of this function turn the file
// into an io.Reader.
diff --git a/vendor/src/gopkg.in/mgo.v2/server.go b/vendor/src/gopkg.in/mgo.v2/server.go
index d1a8e916a56..2eec1aeded0 100644
--- a/vendor/src/gopkg.in/mgo.v2/server.go
+++ b/vendor/src/gopkg.in/mgo.v2/server.go
@@ -84,9 +84,8 @@ func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *
sync: sync,
dial: dial,
info: &defaultServerInfo,
+ pingValue: time.Hour, // Push it back before an actual ping.
}
- // Once so the server gets a ping value, then loop in background.
- server.pinger(false)
go server.pinger(true)
return server
}
@@ -274,7 +273,7 @@ NextTagSet:
return false
}
-var pingDelay = 5 * time.Second
+var pingDelay = 15 * time.Second
func (server *mongoServer) pinger(loop bool) {
var delay time.Duration
@@ -297,7 +296,7 @@ func (server *mongoServer) pinger(loop bool) {
time.Sleep(delay)
}
op := op
- socket, _, err := server.AcquireSocket(0, 3*delay)
+ socket, _, err := server.AcquireSocket(0, delay)
if err == nil {
start := time.Now()
_, _ = socket.SimpleQuery(&op)
diff --git a/vendor/src/gopkg.in/mgo.v2/session.go b/vendor/src/gopkg.in/mgo.v2/session.go
index 649af407a9d..479f1a2c210 100644
--- a/vendor/src/gopkg.in/mgo.v2/session.go
+++ b/vendor/src/gopkg.in/mgo.v2/session.go
@@ -63,25 +63,34 @@ const (
Strong Mode = 2 // Same as Primary.
)
+// mgo.v3: Drop Strong mode, suffix all modes with "Mode".
+
// When changing the Session type, check if newSession and copySession
// need to be updated too.
+// Session represents a communication session with the database.
+//
+// All Session methods are concurrency-safe and may be called from multiple
+// goroutines. In all session modes but Eventual, using the session from
+// multiple goroutines will cause them to share the same underlying socket.
+// See the documentation on Session.SetMode for more details.
type Session struct {
- m sync.RWMutex
- cluster_ *mongoCluster
- slaveSocket *mongoSocket
- masterSocket *mongoSocket
- slaveOk bool
- consistency Mode
- queryConfig query
- safeOp *queryOp
- syncTimeout time.Duration
- sockTimeout time.Duration
- defaultdb string
- sourcedb string
- dialCred *Credential
- creds []Credential
- poolLimit int
+ m sync.RWMutex
+ cluster_ *mongoCluster
+ slaveSocket *mongoSocket
+ masterSocket *mongoSocket
+ slaveOk bool
+ consistency Mode
+ queryConfig query
+ safeOp *queryOp
+ syncTimeout time.Duration
+ sockTimeout time.Duration
+ defaultdb string
+ sourcedb string
+ dialCred *Credential
+ creds []Credential
+ poolLimit int
+ bypassValidation bool
}
type Database struct {
@@ -309,7 +318,7 @@ type DialInfo struct {
// Timeout is the amount of time to wait for a server to respond when
// first connecting and on follow up operations in the session. If
// timeout is zero, the call may block forever waiting for a connection
- // to be established.
+ // to be established. Timeout does not affect logic in DialServer.
Timeout time.Duration
// FailFast will cause connection and query attempts to fail faster when
@@ -365,6 +374,8 @@ type DialInfo struct {
Dial func(addr net.Addr) (net.Conn, error)
}
+// mgo.v3: Drop DialInfo.Dial.
+
// ServerAddr represents the address for establishing a connection to an
// individual MongoDB server.
type ServerAddr struct {
@@ -974,12 +985,14 @@ type indexSpec struct {
DropDups bool "dropDups,omitempty"
Background bool ",omitempty"
Sparse bool ",omitempty"
- Bits, Min, Max int ",omitempty"
+ Bits int ",omitempty"
+ Min, Max float64 ",omitempty"
BucketSize float64 "bucketSize,omitempty"
ExpireAfter int "expireAfterSeconds,omitempty"
Weights bson.D ",omitempty"
DefaultLanguage string "default_language,omitempty"
LanguageOverride string "language_override,omitempty"
+ TextIndexVersion int "textIndexVersion,omitempty"
}
type Index struct {
@@ -993,13 +1006,21 @@ type Index struct {
// documents with indexed time.Time older than the provided delta.
ExpireAfter time.Duration
- // Name holds the stored index name. On creation this field is ignored and the index name
- // is automatically computed by EnsureIndex based on the index key
+ // Name holds the stored index name. On creation if this field is unset it is
+ // computed by EnsureIndex based on the index key.
Name string
// Properties for spatial indexes.
- Bits, Min, Max int
- BucketSize float64
+ //
+ // Min and Max were improperly typed as int when they should have been
+ // floats. To preserve backwards compatibility they are still typed as
+ // int and the following two fields enable reading and writing the same
+ // fields as float numbers. In mgo.v3, these fields will be dropped and
+ // Min/Max will become floats.
+ Min, Max int
+ Minf, Maxf float64
+ BucketSize float64
+ Bits int
// Properties for text indexes.
DefaultLanguage string
@@ -1012,6 +1033,9 @@ type Index struct {
Weights map[string]int
}
+// mgo.v3: Drop Minf and Maxf and transform Min and Max to floats.
+// mgo.v3: Drop DropDups as it's unsupported past 2.8.
+
type indexKeyInfo struct {
name string
key bson.D
@@ -1193,8 +1217,8 @@ func (c *Collection) EnsureIndex(index Index) error {
Background: index.Background,
Sparse: index.Sparse,
Bits: index.Bits,
- Min: index.Min,
- Max: index.Max,
+ Min: index.Minf,
+ Max: index.Maxf,
BucketSize: index.BucketSize,
ExpireAfter: int(index.ExpireAfter / time.Second),
Weights: keyInfo.weights,
@@ -1202,6 +1226,15 @@ func (c *Collection) EnsureIndex(index Index) error {
LanguageOverride: index.LanguageOverride,
}
+ if spec.Min == 0 && spec.Max == 0 {
+ spec.Min = float64(index.Min)
+ spec.Max = float64(index.Max)
+ }
+
+ if index.Name != "" {
+ spec.Name = index.Name
+ }
+
NextField:
for name, weight := range index.Weights {
for i, elem := range spec.Weights {
@@ -1231,17 +1264,15 @@ NextField:
return err
}
-// DropIndex removes the index with key from the collection.
+// DropIndex drops the index with the provided key from the c collection.
//
-// The key value determines which fields compose the index. The index ordering
-// will be ascending by default. To obtain an index with a descending order,
-// the field name should be prefixed by a dash (e.g. []string{"-time"}).
+// See EnsureIndex for details on the accepted key variants.
//
// For example:
//
-// err := collection.DropIndex("lastname", "firstname")
+// err1 := collection.DropIndex("firstField", "-secondField")
+// err2 := collection.DropIndex("customIndexName")
//
-// See the EnsureIndex method for more details on indexes.
func (c *Collection) DropIndex(key ...string) error {
keyInfo, err := parseIndexKey(key)
if err != nil {
@@ -1271,6 +1302,58 @@ func (c *Collection) DropIndex(key ...string) error {
return nil
}
+// DropIndexName removes the index with the provided index name.
+//
+// For example:
+//
+// err := collection.DropIndex("customIndexName")
+//
+func (c *Collection) DropIndexName(name string) error {
+ session := c.Database.Session
+
+ session = session.Clone()
+ defer session.Close()
+ session.SetMode(Strong, false)
+
+ c = c.With(session)
+
+ indexes, err := c.Indexes()
+ if err != nil {
+ return err
+ }
+
+ var index Index
+ for _, idx := range indexes {
+ if idx.Name == name {
+ index = idx
+ break
+ }
+ }
+
+ if index.Name != "" {
+ keyInfo, err := parseIndexKey(index.Key)
+ if err != nil {
+ return err
+ }
+
+ cacheKey := c.FullName + "\x00" + keyInfo.name
+ session.cluster().CacheIndex(cacheKey, false)
+ }
+
+ result := struct {
+ ErrMsg string
+ Ok bool
+ }{}
+ err = c.Database.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result)
+ if err != nil {
+ return err
+ }
+ if !result.Ok {
+ return errors.New(result.ErrMsg)
+ }
+ return nil
+}
+
// Indexes returns a list of all indexes for the collection.
//
// For example, this snippet would drop all available indexes:
@@ -1340,15 +1423,36 @@ func (c *Collection) Indexes() (indexes []Index, err error) {
}
func indexFromSpec(spec indexSpec) Index {
- return Index{
- Name: spec.Name,
- Key: simpleIndexKey(spec.Key),
- Unique: spec.Unique,
- DropDups: spec.DropDups,
- Background: spec.Background,
- Sparse: spec.Sparse,
- ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second,
+ index := Index{
+ Name: spec.Name,
+ Key: simpleIndexKey(spec.Key),
+ Unique: spec.Unique,
+ DropDups: spec.DropDups,
+ Background: spec.Background,
+ Sparse: spec.Sparse,
+ Minf: spec.Min,
+ Maxf: spec.Max,
+ Bits: spec.Bits,
+ BucketSize: spec.BucketSize,
+ DefaultLanguage: spec.DefaultLanguage,
+ LanguageOverride: spec.LanguageOverride,
+ ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second,
+ }
+ if float64(int(spec.Min)) == spec.Min && float64(int(spec.Max)) == spec.Max {
+ index.Min = int(spec.Min)
+ index.Max = int(spec.Max)
+ }
+ if spec.TextIndexVersion > 0 {
+ index.Key = make([]string, len(spec.Weights))
+ index.Weights = make(map[string]int)
+ for i, elem := range spec.Weights {
+ index.Key[i] = "$text:" + elem.Name
+ if w, ok := elem.Value.(int); ok {
+ index.Weights[elem.Name] = w
+ }
+ }
}
+ return index
}
type indexSlice []Index
@@ -1575,6 +1679,24 @@ func (s *Session) SetPoolLimit(limit int) {
s.m.Unlock()
}
+// SetBypassValidation sets whether the server should bypass the registered
+// validation expressions executed when documents are inserted or modified,
+// in the interest of preserving properties for documents in the collection
+// being modfified. The default is to not bypass, and thus to perform the
+// validation expressions registered for modified collections.
+//
+// Document validation was introuced in MongoDB 3.2.
+//
+// Relevant documentation:
+//
+// https://docs.mongodb.org/manual/release-notes/3.2/#bypass-validation
+//
+func (s *Session) SetBypassValidation(bypass bool) {
+ s.m.Lock()
+ s.bypassValidation = bypass
+ s.m.Unlock()
+}
+
// SetBatch sets the default batch size used when fetching documents from the
// database. It's possible to change this setting on a per-query basis as
// well, using the Query.Batch method.
@@ -1616,8 +1738,8 @@ type Safe struct {
W int // Min # of servers to ack before success
WMode string // Write mode for MongoDB 2.0+ (e.g. "majority")
WTimeout int // Milliseconds to wait for W before timing out
- FSync bool // Should servers sync to disk before returning success
- J bool // Wait for next group commit if journaling; no effect otherwise
+ FSync bool // Sync via the journal if present, or via data files sync otherwise
+ J bool // Sync via the journal if present
}
// Safe returns the current safety mode for the session.
@@ -1661,10 +1783,18 @@ func (s *Session) Safe() (safe *Safe) {
// the links below for more details (note that MongoDB internally reuses the
// "w" field name for WMode).
//
-// If safe.FSync is true and journaling is disabled, the servers will be
-// forced to sync all files to disk immediately before returning. If the
-// same option is true but journaling is enabled, the server will instead
-// await for the next group commit before returning.
+// If safe.J is true, servers will block until write operations have been
+// committed to the journal. Cannot be used in combination with FSync. Prior
+// to MongoDB 2.6 this option was ignored if the server was running without
+// journaling. Starting with MongoDB 2.6 write operations will fail with an
+// exception if this option is used when the server is running without
+// journaling.
+//
+// If safe.FSync is true and the server is running without journaling, blocks
+// until the server has synced all data files to disk. If the server is running
+// with journaling, this acts the same as the J option, blocking until write
+// operations have been committed to the journal. Cannot be used in
+// combination with J.
//
// Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync
// to force the server to wait for a group commit in case journaling is
@@ -1811,7 +1941,7 @@ func (s *Session) Run(cmd interface{}, result interface{}) error {
// used for reading operations to those with both tag "disk" set to
// "ssd" and tag "rack" set to 1:
//
-// session.SelectSlaves(bson.D{{"disk", "ssd"}, {"rack", 1}})
+// session.SelectServers(bson.D{{"disk", "ssd"}, {"rack", 1}})
//
// Multiple sets of tags may be provided, in which case the used server
// must match all tags within any one set.
@@ -2172,6 +2302,8 @@ func (p *Pipe) Batch(n int) *Pipe {
return p
}
+// mgo.v3: Use a single user-visible error type.
+
type LastError struct {
Err string
Code, N, Waited int
@@ -2179,6 +2311,9 @@ type LastError struct {
WTimeout bool
UpdatedExisting bool `bson:"updatedExisting"`
UpsertedId interface{} `bson:"upserted"`
+
+ modified int
+ errors []error
}
func (err *LastError) Error() string {
@@ -2215,6 +2350,13 @@ func IsDup(err error) bool {
return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ")
case *QueryError:
return e.Code == 11000 || e.Code == 11001 || e.Code == 12582
+ case *bulkError:
+ for _, ee := range e.errs {
+ if !IsDup(ee) {
+ return false
+ }
+ }
+ return true
}
return false
}
@@ -2224,7 +2366,7 @@ func IsDup(err error) bool {
// happens while inserting the provided documents, the returned error will
// be of type *LastError.
func (c *Collection) Insert(docs ...interface{}) error {
- _, err := c.writeQuery(&insertOp{c.FullName, docs, 0})
+ _, err := c.writeOp(&insertOp{c.FullName, docs, 0}, true)
return err
}
@@ -2240,7 +2382,15 @@ func (c *Collection) Insert(docs ...interface{}) error {
// http://www.mongodb.org/display/DOCS/Atomic+Operations
//
func (c *Collection) Update(selector interface{}, update interface{}) error {
- lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 0})
+ if selector == nil {
+ selector = bson.D{}
+ }
+ op := updateOp{
+ Collection: c.FullName,
+ Selector: selector,
+ Update: update,
+ }
+ lerr, err := c.writeOp(&op, true)
if err == nil && lerr != nil && !lerr.UpdatedExisting {
return ErrNotFound
}
@@ -2276,7 +2426,17 @@ type ChangeInfo struct {
// http://www.mongodb.org/display/DOCS/Atomic+Operations
//
func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *ChangeInfo, err error) {
- lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 2})
+ if selector == nil {
+ selector = bson.D{}
+ }
+ op := updateOp{
+ Collection: c.FullName,
+ Selector: selector,
+ Update: update,
+ Flags: 2,
+ Multi: true,
+ }
+ lerr, err := c.writeOp(&op, true)
if err == nil && lerr != nil {
info = &ChangeInfo{Updated: lerr.N}
}
@@ -2297,7 +2457,17 @@ func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *
// http://www.mongodb.org/display/DOCS/Atomic+Operations
//
func (c *Collection) Upsert(selector interface{}, update interface{}) (info *ChangeInfo, err error) {
- lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 1})
+ if selector == nil {
+ selector = bson.D{}
+ }
+ op := updateOp{
+ Collection: c.FullName,
+ Selector: selector,
+ Update: update,
+ Flags: 1,
+ Upsert: true,
+ }
+ lerr, err := c.writeOp(&op, true)
if err == nil && lerr != nil {
info = &ChangeInfo{}
if lerr.UpdatedExisting {
@@ -2329,7 +2499,10 @@ func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeI
// http://www.mongodb.org/display/DOCS/Removing
//
func (c *Collection) Remove(selector interface{}) error {
- lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 1})
+ if selector == nil {
+ selector = bson.D{}
+ }
+ lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1, 1}, true)
if err == nil && lerr != nil && lerr.N == 0 {
return ErrNotFound
}
@@ -2355,7 +2528,10 @@ func (c *Collection) RemoveId(id interface{}) error {
// http://www.mongodb.org/display/DOCS/Removing
//
func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) {
- lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 0})
+ if selector == nil {
+ selector = bson.D{}
+ }
+ lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0, 0}, true)
if err == nil && lerr != nil {
info = &ChangeInfo{Removed: lerr.N}
}
@@ -3871,7 +4047,7 @@ type BuildInfo struct {
VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise
GitVersion string `bson:"gitVersion"`
OpenSSLVersion string `bson:"OpenSSLVersion"`
- SysInfo string `bson:"sysInfo"`
+ SysInfo string `bson:"sysInfo"` // Deprecated and empty on MongoDB 3.2+.
Bits int
Debug bool
MaxObjectSize int `bson:"maxBsonObjectSize"`
@@ -3913,6 +4089,9 @@ func (s *Session) BuildInfo() (info BuildInfo, err error) {
// That information may be moved to another field if people need it.
info.GitVersion = info.GitVersion[:i]
}
+ if info.SysInfo == "deprecated" {
+ info.SysInfo = ""
+ }
return
}
@@ -4057,27 +4236,36 @@ type writeCmdResult struct {
Index int
Id interface{} `_id`
}
- Errors []struct {
- Ok bool
- Index int
- Code int
- N int
- ErrMsg string
- } `bson:"writeErrors"`
- ConcernError struct {
- Code int
- ErrMsg string
- } `bson:"writeConcernError"`
+ ConcernError writeConcernError `bson:"writeConcernError"`
+ Errors []writeCmdError `bson:"writeErrors"`
+}
+
+type writeConcernError struct {
+ Code int
+ ErrMsg string
+}
+
+type writeCmdError struct {
+ Index int
+ Code int
+ ErrMsg string
+}
+
+func (r *writeCmdResult) QueryErrors() []error {
+ var errs []error
+ for _, err := range r.Errors {
+ errs = append(errs, &QueryError{Code: err.Code, Message: err.ErrMsg})
+ }
+ return errs
}
-// writeQuery runs the given modifying operation, potentially followed up
+// writeOp runs the given modifying operation, potentially followed up
// by a getLastError command in case the session is in safe mode. The
// LastError result is made available in lerr, and if lerr.Err is set it
// will also be returned as err.
-func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) {
+func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err error) {
s := c.Database.Session
- dbname := c.Database.Name
- socket, err := s.acquireSocket(dbname == "local")
+ socket, err := s.acquireSocket(c.Database.Name == "local")
if err != nil {
return nil, err
}
@@ -4085,13 +4273,13 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) {
s.m.RLock()
safeOp := s.safeOp
+ bypassValidation := s.bypassValidation
s.m.RUnlock()
- // TODO Enable this path for wire version 2 as well.
- if socket.ServerInfo().MaxWireVersion >= 3 {
+ if socket.ServerInfo().MaxWireVersion >= 2 {
// Servers with a more recent write protocol benefit from write commands.
if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 {
- var firstErr error
+ var errors []error
// Maximum batch size is 1000. Must split out in separate operations for compatibility.
all := op.documents
for i := 0; i < len(all); i += 1000 {
@@ -4100,22 +4288,59 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) {
l = len(all)
}
op.documents = all[i:l]
- _, err := c.writeCommand(socket, safeOp, op)
+ lerr, err := c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation)
if err != nil {
- if op.flags&1 != 0 {
- if firstErr == nil {
- firstErr = err
- }
- } else {
- return nil, err
+ errors = append(errors, lerr.errors...)
+ if op.flags&1 == 0 {
+ return &LastError{errors: errors}, err
}
}
}
- return nil, firstErr
+ if len(errors) == 0 {
+ return nil, nil
+ }
+ return &LastError{errors: errors}, errors[0]
+ }
+ return c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation)
+ } else if updateOps, ok := op.(bulkUpdateOp); ok {
+ var lerr LastError
+ for _, updateOp := range updateOps {
+ oplerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered)
+ if err != nil {
+ lerr.N += oplerr.N
+ lerr.modified += oplerr.modified
+ lerr.errors = append(lerr.errors, oplerr.errors...)
+ if ordered {
+ break
+ }
+ }
}
- return c.writeCommand(socket, safeOp, op)
+ if len(lerr.errors) == 0 {
+ return nil, nil
+ }
+ return &lerr, lerr.errors[0]
+ } else if deleteOps, ok := op.(bulkDeleteOp); ok {
+ var lerr LastError
+ for _, deleteOp := range deleteOps {
+ oplerr, err := c.writeOpQuery(socket, safeOp, deleteOp, ordered)
+ if err != nil {
+ lerr.N += oplerr.N
+ lerr.modified += oplerr.modified
+ lerr.errors = append(lerr.errors, oplerr.errors...)
+ if ordered {
+ break
+ }
+ }
+ }
+ if len(lerr.errors) == 0 {
+ return nil, nil
+ }
+ return &lerr, lerr.errors[0]
}
+ return c.writeOpQuery(socket, safeOp, op, ordered)
+}
+func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) {
if safeOp == nil {
return nil, socket.Query(op)
}
@@ -4125,7 +4350,7 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) {
var replyErr error
mutex.Lock()
query := *safeOp // Copy the data.
- query.collection = dbname + ".$cmd"
+ query.collection = c.Database.Name + ".$cmd"
query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
replyData = docData
replyErr = err
@@ -4155,7 +4380,7 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) {
return result, nil
}
-func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op interface{}) (lerr *LastError, err error) {
+func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered, bypassValidation bool) (lerr *LastError, err error) {
var writeConcern interface{}
if safeOp == nil {
writeConcern = bson.D{{"w", 0}}
@@ -4175,29 +4400,40 @@ func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op inter
}
case *updateOp:
// http://docs.mongodb.org/manual/reference/command/update
- selector := op.selector
- if selector == nil {
- selector = bson.D{}
+ cmd = bson.D{
+ {"update", c.Name},
+ {"updates", []interface{}{op}},
+ {"writeConcern", writeConcern},
+ {"ordered", ordered},
}
+ case bulkUpdateOp:
+ // http://docs.mongodb.org/manual/reference/command/update
cmd = bson.D{
{"update", c.Name},
- {"updates", []bson.D{{{"q", selector}, {"u", op.update}, {"upsert", op.flags&1 != 0}, {"multi", op.flags&2 != 0}}}},
+ {"updates", op},
{"writeConcern", writeConcern},
- //{"ordered", <bool>},
+ {"ordered", ordered},
}
case *deleteOp:
// http://docs.mongodb.org/manual/reference/command/delete
- selector := op.selector
- if selector == nil {
- selector = bson.D{}
+ cmd = bson.D{
+ {"delete", c.Name},
+ {"deletes", []interface{}{op}},
+ {"writeConcern", writeConcern},
+ {"ordered", ordered},
}
+ case bulkDeleteOp:
+ // http://docs.mongodb.org/manual/reference/command/delete
cmd = bson.D{
{"delete", c.Name},
- {"deletes", []bson.D{{{"q", selector}, {"limit", op.flags & 1}}}},
+ {"deletes", op},
{"writeConcern", writeConcern},
- //{"ordered", <bool>},
+ {"ordered", ordered},
}
}
+ if bypassValidation {
+ cmd = append(cmd, bson.DocElem{"bypassDocumentValidation", true})
+ }
var result writeCmdResult
err = c.Database.run(socket, cmd, &result)
@@ -4205,17 +4441,18 @@ func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op inter
lerr = &LastError{
UpdatedExisting: result.N > 0 && len(result.Upserted) == 0,
N: result.N,
+
+ modified: result.NModified,
+ errors: result.QueryErrors(),
}
if len(result.Upserted) > 0 {
lerr.UpsertedId = result.Upserted[0].Id
}
if len(result.Errors) > 0 {
e := result.Errors[0]
- if !e.Ok {
- lerr.Code = e.Code
- lerr.Err = e.ErrMsg
- err = lerr
- }
+ lerr.Code = e.Code
+ lerr.Err = e.ErrMsg
+ err = lerr
} else if result.ConcernError.Code != 0 {
e := result.ConcernError
lerr.Code = e.Code
diff --git a/vendor/src/gopkg.in/mgo.v2/session_test.go b/vendor/src/gopkg.in/mgo.v2/session_test.go
index 8e2d7c9487f..eb3929830e9 100644
--- a/vendor/src/gopkg.in/mgo.v2/session_test.go
+++ b/vendor/src/gopkg.in/mgo.v2/session_test.go
@@ -30,7 +30,6 @@ import (
"flag"
"fmt"
"math"
- "reflect"
"runtime"
"sort"
"strconv"
@@ -287,10 +286,7 @@ func (s *S) TestDatabaseAndCollectionNames(c *C) {
names, err := session.DatabaseNames()
c.Assert(err, IsNil)
- if !reflect.DeepEqual(names, []string{"db1", "db2"}) {
- // 2.4+ has "local" as well.
- c.Assert(names, DeepEquals, []string{"db1", "db2", "local"})
- }
+ c.Assert(filterDBs(names), DeepEquals, []string{"db1", "db2"})
// Try to exercise cursor logic. 2.8.0-rc3 still ignores this.
session.SetBatch(2)
@@ -698,20 +694,30 @@ func (s *S) TestDropDatabase(c *C) {
names, err := session.DatabaseNames()
c.Assert(err, IsNil)
- if !reflect.DeepEqual(names, []string{"db2"}) {
- // 2.4+ has "local" as well.
- c.Assert(names, DeepEquals, []string{"db2", "local"})
- }
+ c.Assert(filterDBs(names), DeepEquals, []string{"db2"})
err = db2.DropDatabase()
c.Assert(err, IsNil)
names, err = session.DatabaseNames()
c.Assert(err, IsNil)
- if !reflect.DeepEqual(names, []string(nil)) {
- // 2.4+ has "local" as well.
- c.Assert(names, DeepEquals, []string{"local"})
+ c.Assert(filterDBs(names), DeepEquals, []string{})
+}
+
+func filterDBs(dbs []string) []string {
+ var i int
+ for _, name := range dbs {
+ switch name {
+ case "admin", "local":
+ default:
+ dbs[i] = name
+ i++
+ }
+ }
+ if len(dbs) == 0 {
+ return []string{}
}
+ return dbs[:i]
}
func (s *S) TestDropCollection(c *C) {
@@ -1196,7 +1202,13 @@ func (s *S) TestQueryComment(c *C) {
err = query.One(nil)
c.Assert(err, IsNil)
- n, err := session.DB("mydb").C("system.profile").Find(bson.M{"query.$query.n": 41, "query.$comment": "some comment"}).Count()
+ commentField := "query.$comment"
+ nField := "query.$query.n"
+ if s.versionAtLeast(3, 2) {
+ commentField = "query.comment"
+ nField = "query.filter.n"
+ }
+ n, err := session.DB("mydb").C("system.profile").Find(bson.M{nField: 41, commentField: "some comment"}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 1)
}
@@ -1652,7 +1664,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) {
mgo.ResetStats()
- timeout := 3 * time.Second
+ timeout := 5 * time.Second
query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2)
iter := query.Tail(timeout)
@@ -1674,39 +1686,35 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) {
mgo.ResetStats()
// The following call to Next will block.
+ done := make(chan bool)
+ defer func() { <-done }()
go func() {
// The internal AwaitData timing of MongoDB is around 2 seconds,
// so this should force mgo to sleep at least once by itself to
// respect the requested timeout.
- time.Sleep(timeout + 5e8*time.Nanosecond)
+ c.Logf("[GOROUTINE] Starting and sleeping...")
+ time.Sleep(timeout - 2*time.Second)
+ c.Logf("[GOROUTINE] Woke up...")
session := session.New()
- defer session.Close()
- coll := session.DB("mydb").C("mycoll")
- coll.Insert(M{"n": 47})
+ c.Logf("[GOROUTINE] Session created and will insert...")
+ err := coll.Insert(M{"n": 47})
+ c.Logf("[GOROUTINE] Insert attempted, err=%v...", err)
+ session.Close()
+ c.Logf("[GOROUTINE] Session closed.")
+ c.Check(err, IsNil)
+ done <- true
}()
c.Log("Will wait for Next with N=47...")
ok := iter.Next(&result)
+ c.Log("Next unblocked...")
c.Assert(ok, Equals, true)
+
c.Assert(iter.Err(), IsNil)
c.Assert(iter.Timeout(), Equals, false)
c.Assert(result.N, Equals, 47)
c.Log("Got Next with N=47!")
- // The following may break because it depends a bit on the internal
- // timing used by MongoDB's AwaitData logic. If it does, the problem
- // will be observed as more GET_MORE_OPs than predicted:
- // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep +
- // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47
- stats := mgo.GetStats()
- if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it.
- c.Assert(stats.SentOps, Equals, 4)
- } else {
- c.Assert(stats.SentOps, Equals, 5)
- }
- c.Assert(stats.ReceivedOps, Equals, 4) // REPLY_OPs for 1*QUERY_OP for nonce + 2*GET_MORE_OPs + 1*QUERY_OP
- c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response
-
c.Log("Will wait for a result which will never come...")
started := time.Now()
@@ -1796,7 +1804,7 @@ func (s *S) TestFindTailTimeoutNoSleep(c *C) {
// 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next +
// 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47
stats := mgo.GetStats()
- if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it.
+ if s.versionAtLeast(2, 6) {
c.Assert(stats.SentOps, Equals, 3)
} else {
c.Assert(stats.SentOps, Equals, 4)
@@ -1886,20 +1894,6 @@ func (s *S) TestFindTailNoTimeout(c *C) {
c.Assert(result.N, Equals, 47)
c.Log("Got Next with N=47!")
- // The following may break because it depends a bit on the internal
- // timing used by MongoDB's AwaitData logic. If it does, the problem
- // will be observed as more GET_MORE_OPs than predicted:
- // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next +
- // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47
- stats := mgo.GetStats()
- if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it.
- c.Assert(stats.SentOps, Equals, 3)
- } else {
- c.Assert(stats.SentOps, Equals, 4)
- }
- c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP
- c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response
-
c.Log("Will wait for a result which will never come...")
gotNext := make(chan bool)
@@ -2179,6 +2173,10 @@ func (s *S) TestFindForResetsResult(c *C) {
}
func (s *S) TestFindIterSnapshot(c *C) {
+ if s.versionAtLeast(3, 2) {
+ c.Skip("Broken in 3.2: https://jira.mongodb.org/browse/SERVER-21403")
+ }
+
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
@@ -2283,6 +2281,10 @@ func (s *S) TestSortScoreText(c *C) {
err = coll.EnsureIndex(mgo.Index{
Key: []string{"$text:a", "$text:b"},
})
+ msg := "text search not enabled"
+ if err != nil && strings.Contains(err.Error(), msg) {
+ c.Skip(msg)
+ }
c.Assert(err, IsNil)
err = coll.Insert(M{
@@ -2517,8 +2519,7 @@ func (s *S) TestSafeInsert(c *C) {
// It must have sent two operations (INSERT_OP + getLastError QUERY_OP)
stats := mgo.GetStats()
- // TODO Will be 2.6 when write commands are enabled for it.
- if s.versionAtLeast(3, 0) {
+ if s.versionAtLeast(2, 6) {
c.Assert(stats.SentOps, Equals, 1)
} else {
c.Assert(stats.SentOps, Equals, 2)
@@ -2647,8 +2648,8 @@ var indexTests = []struct {
"name": "loc_old_2d",
"key": M{"loc_old": "2d"},
"ns": "mydb.mycoll",
- "min": -500,
- "max": 500,
+ "min": -500.0,
+ "max": 500.0,
"bits": 32,
},
}, {
@@ -2662,8 +2663,25 @@ var indexTests = []struct {
"name": "loc_2d",
"key": M{"loc": "2d"},
"ns": "mydb.mycoll",
- "min": -500,
- "max": 500,
+ "min": -500.0,
+ "max": 500.0,
+ "bits": 32,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"$2d:loc"},
+ Minf: -500.1,
+ Maxf: 500.1,
+ Min: 1, // Should be ignored
+ Max: 2,
+ Bits: 32,
+ },
+ M{
+ "name": "loc_2d",
+ "key": M{"loc": "2d"},
+ "ns": "mydb.mycoll",
+ "min": -500.1,
+ "max": 500.1,
"bits": 32,
},
}, {
@@ -2719,6 +2737,16 @@ var indexTests = []struct {
"language_override": "language",
"textIndexVersion": 2,
},
+}, {
+ mgo.Index{
+ Key: []string{"cn"},
+ Name: "CustomName",
+ },
+ M{
+ "name": "CustomName",
+ "key": M{"cn": 1},
+ "ns": "mydb.mycoll",
+ },
}}
func (s *S) TestEnsureIndex(c *C) {
@@ -2730,16 +2758,20 @@ func (s *S) TestEnsureIndex(c *C) {
idxs := session.DB("mydb").C("system.indexes")
for _, test := range indexTests {
- if !s.versionAtLeast(2, 4) && test.expected["weights"] != nil {
- // No text indexes until 2.4.
+ err = coll.EnsureIndex(test.index)
+ msg := "text search not enabled"
+ if err != nil && strings.Contains(err.Error(), msg) {
continue
}
-
- err = coll.EnsureIndex(test.index)
c.Assert(err, IsNil)
+ expectedName := test.index.Name
+ if expectedName == "" {
+ expectedName, _ = test.expected["name"].(string)
+ }
+
obtained := M{}
- err = idxs.Find(M{"name": test.expected["name"]}).One(obtained)
+ err = idxs.Find(M{"name": expectedName}).One(obtained)
c.Assert(err, IsNil)
delete(obtained, "v")
@@ -2747,12 +2779,60 @@ func (s *S) TestEnsureIndex(c *C) {
if s.versionAtLeast(2, 7) {
// Was deprecated in 2.6, and not being reported by 2.7+.
delete(test.expected, "dropDups")
+ test.index.DropDups = false
+ }
+ if s.versionAtLeast(3, 2) && test.expected["textIndexVersion"] != nil {
+ test.expected["textIndexVersion"] = 3
}
c.Assert(obtained, DeepEquals, test.expected)
- err = coll.DropIndex(test.index.Key...)
+ // The result of Indexes must match closely what was used to create the index.
+ indexes, err := coll.Indexes()
c.Assert(err, IsNil)
+ c.Assert(indexes, HasLen, 2)
+ gotIndex := indexes[0]
+ if gotIndex.Name == "_id_" {
+ gotIndex = indexes[1]
+ }
+ wantIndex := test.index
+ if wantIndex.Name == "" {
+ wantIndex.Name = gotIndex.Name
+ }
+ if strings.HasPrefix(wantIndex.Key[0], "@") {
+ wantIndex.Key[0] = "$2d:" + wantIndex.Key[0][1:]
+ }
+ if wantIndex.Minf == 0 && wantIndex.Maxf == 0 {
+ wantIndex.Minf = float64(wantIndex.Min)
+ wantIndex.Maxf = float64(wantIndex.Max)
+ } else {
+ wantIndex.Min = gotIndex.Min
+ wantIndex.Max = gotIndex.Max
+ }
+ if wantIndex.DefaultLanguage == "" {
+ wantIndex.DefaultLanguage = gotIndex.DefaultLanguage
+ }
+ if wantIndex.LanguageOverride == "" {
+ wantIndex.LanguageOverride = gotIndex.LanguageOverride
+ }
+ for name, _ := range gotIndex.Weights {
+ if _, ok := wantIndex.Weights[name]; !ok {
+ if wantIndex.Weights == nil {
+ wantIndex.Weights = make(map[string]int)
+ }
+ wantIndex.Weights[name] = 1
+ }
+ }
+ c.Assert(gotIndex, DeepEquals, wantIndex)
+
+ // Drop created index by key or by name if a custom name was used.
+ if test.index.Name == "" {
+ err = coll.DropIndex(test.index.Key...)
+ c.Assert(err, IsNil)
+ } else {
+ err = coll.DropIndexName(test.index.Name)
+ c.Assert(err, IsNil)
+ }
}
}
@@ -2852,24 +2932,57 @@ func (s *S) TestEnsureIndexDropIndex(c *C) {
c.Assert(err, IsNil)
sysidx := session.DB("mydb").C("system.indexes")
- dummy := &struct{}{}
- err = sysidx.Find(M{"name": "a_1"}).One(dummy)
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
c.Assert(err, IsNil)
- err = sysidx.Find(M{"name": "b_1"}).One(dummy)
+ err = sysidx.Find(M{"name": "b_1"}).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
err = coll.DropIndex("a")
c.Assert(err, IsNil)
- err = sysidx.Find(M{"name": "a_1"}).One(dummy)
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
err = coll.DropIndex("a")
c.Assert(err, ErrorMatches, "index not found.*")
}
+func (s *S) TestEnsureIndexDropIndexName(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"b"}, Name: "a"})
+ c.Assert(err, IsNil)
+
+ err = coll.DropIndexName("a")
+ c.Assert(err, IsNil)
+
+ sysidx := session.DB("mydb").C("system.indexes")
+
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
+ c.Assert(err, IsNil)
+
+ err = sysidx.Find(M{"name": "a"}).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.DropIndexName("a_1")
+ c.Assert(err, IsNil)
+
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.DropIndexName("a_1")
+ c.Assert(err, ErrorMatches, "index not found.*")
+}
+
func (s *S) TestEnsureIndexCaching(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
@@ -2948,6 +3061,50 @@ func (s *S) TestEnsureIndexGetIndexes(c *C) {
c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"})
}
+func (s *S) TestEnsureIndexNameCaching(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ mgo.ResetStats()
+
+ // Second EnsureIndex should be cached and do nothing.
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 0)
+
+ // Resetting the cache should make it contact the server again.
+ session.ResetIndexCache()
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SentOps > 0, Equals, true)
+
+ // Dropping the index should also drop the cached index key.
+ err = coll.DropIndexName("custom")
+ c.Assert(err, IsNil)
+
+ mgo.ResetStats()
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SentOps > 0, Equals, true)
+}
+
func (s *S) TestEnsureIndexEvalGetIndexes(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
@@ -3312,7 +3469,13 @@ func (s *S) TestBuildInfo(c *C) {
c.Assert(info.VersionArray, DeepEquals, v)
c.Assert(info.GitVersion, Matches, "[a-z0-9]+")
- c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*")
+
+ if s.versionAtLeast(3, 2) {
+ // It was deprecated in 3.2.
+ c.Assert(info.SysInfo, Equals, "")
+ } else {
+ c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*")
+ }
if info.Bits != 32 && info.Bits != 64 {
c.Fatalf("info.Bits is %d", info.Bits)
}
@@ -3678,6 +3841,53 @@ func (s *S) TestNewIterNoServerPresetErr(c *C) {
c.Assert(iter.Err(), ErrorMatches, "my error")
}
+func (s *S) TestBypassValidation(c *C) {
+ if !s.versionAtLeast(3, 2) {
+ c.Skip("validation supported on 3.2+")
+ }
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ err = coll.Database.Run(bson.D{
+ {"collMod", "mycoll"},
+ {"validator", M{"s": M{"$type": "string"}}},
+ }, nil)
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 2})
+ c.Assert(err, ErrorMatches, "Document failed validation")
+
+ err = coll.Update(M{"n": 1}, M{"n": 10})
+ c.Assert(err, ErrorMatches, "Document failed validation")
+
+ session.SetBypassValidation(true)
+
+ err = coll.Insert(M{"n": 3})
+ c.Assert(err, IsNil)
+
+ err = coll.Update(M{"n": 3}, M{"n": 4})
+ c.Assert(err, IsNil)
+
+ // Ensure this still works. Shouldn't be affected.
+ err = coll.Remove(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ var result struct{ N int }
+ var ns []int
+ iter := coll.Find(nil).Iter()
+ for iter.Next(&result) {
+ ns = append(ns, result.N)
+ }
+ c.Assert(iter.Err(), IsNil)
+ sort.Ints(ns)
+ c.Assert(ns, DeepEquals, []int{4})
+}
+
// --------------------------------------------------------------------------
// Some benchmarks that require a running database.
diff --git a/vendor/src/gopkg.in/mgo.v2/socket.go b/vendor/src/gopkg.in/mgo.v2/socket.go
index 43563926e9b..28b30b2d882 100644
--- a/vendor/src/gopkg.in/mgo.v2/socket.go
+++ b/vendor/src/gopkg.in/mgo.v2/socket.go
@@ -94,27 +94,23 @@ type queryWrapper struct {
}
func (op *queryOp) finalQuery(socket *mongoSocket) interface{} {
- if socket.ServerInfo().Mongos {
+ if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos {
var modeName string
- if op.flags&flagSlaveOk == 0 {
+ switch op.mode {
+ case Strong:
modeName = "primary"
- } else {
- switch op.mode {
- case Strong:
- modeName = "primary"
- case Monotonic, Eventual:
- modeName = "secondaryPreferred"
- case PrimaryPreferred:
- modeName = "primaryPreferred"
- case Secondary:
- modeName = "secondary"
- case SecondaryPreferred:
- modeName = "secondaryPreferred"
- case Nearest:
- modeName = "nearest"
- default:
- panic(fmt.Sprintf("unsupported read mode: %d", op.mode))
- }
+ case Monotonic, Eventual:
+ modeName = "secondaryPreferred"
+ case PrimaryPreferred:
+ modeName = "primaryPreferred"
+ case Secondary:
+ modeName = "secondary"
+ case SecondaryPreferred:
+ modeName = "secondaryPreferred"
+ case Nearest:
+ modeName = "nearest"
+ default:
+ panic(fmt.Sprintf("unsupported read mode: %d", op.mode))
}
op.hasOptions = true
op.options.ReadPreference = make(bson.D, 0, 2)
@@ -157,16 +153,19 @@ type insertOp struct {
}
type updateOp struct {
- collection string // "database.collection"
- selector interface{}
- update interface{}
- flags uint32
+ Collection string `bson:"-"` // "database.collection"
+ Selector interface{} `bson:"q"`
+ Update interface{} `bson:"u"`
+ Flags uint32 `bson:"-"`
+ Multi bool `bson:"multi,omitempty"`
+ Upsert bool `bson:"upsert,omitempty"`
}
type deleteOp struct {
- collection string // "database.collection"
- selector interface{}
- flags uint32
+ Collection string `bson:"-"` // "database.collection"
+ Selector interface{} `bson:"q"`
+ Flags uint32 `bson:"-"`
+ Limit int `bson:"limit"`
}
type killCursorsOp struct {
@@ -397,15 +396,15 @@ func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
case *updateOp:
buf = addHeader(buf, 2001)
buf = addInt32(buf, 0) // Reserved
- buf = addCString(buf, op.collection)
- buf = addInt32(buf, int32(op.flags))
- debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
- buf, err = addBSON(buf, op.selector)
+ buf = addCString(buf, op.Collection)
+ buf = addInt32(buf, int32(op.Flags))
+ debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector)
+ buf, err = addBSON(buf, op.Selector)
if err != nil {
return err
}
- debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update)
- buf, err = addBSON(buf, op.update)
+ debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update)
+ buf, err = addBSON(buf, op.Update)
if err != nil {
return err
}
@@ -451,10 +450,10 @@ func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
case *deleteOp:
buf = addHeader(buf, 2006)
buf = addInt32(buf, 0) // Reserved
- buf = addCString(buf, op.collection)
- buf = addInt32(buf, int32(op.flags))
- debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
- buf, err = addBSON(buf, op.selector)
+ buf = addCString(buf, op.Collection)
+ buf = addInt32(buf, int32(op.Flags))
+ debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector)
+ buf, err = addBSON(buf, op.Selector)
if err != nil {
return err
}
diff --git a/vendor/src/gopkg.in/mgo.v2/suite_test.go b/vendor/src/gopkg.in/mgo.v2/suite_test.go
index 140e5a09a90..648ad9bb4d3 100644
--- a/vendor/src/gopkg.in/mgo.v2/suite_test.go
+++ b/vendor/src/gopkg.in/mgo.v2/suite_test.go
@@ -103,6 +103,9 @@ func (s *S) SetUpTest(c *C) {
func (s *S) TearDownTest(c *C) {
if s.stopped {
+ s.Stop(":40201")
+ s.Stop(":40202")
+ s.Stop(":40203")
s.StartAll()
}
for _, host := range s.frozen {
@@ -180,13 +183,15 @@ func (s *S) Thaw(host string) {
}
func (s *S) StartAll() {
- // Restart any stopped nodes.
- run("cd _testdb && supervisorctl start all")
- err := run("cd testdb && mongo --nodb wait.js")
- if err != nil {
- panic(err)
+ if s.stopped {
+ // Restart any stopped nodes.
+ run("cd _testdb && supervisorctl start all")
+ err := run("cd testdb && mongo --nodb wait.js")
+ if err != nil {
+ panic(err)
+ }
+ s.stopped = false
}
- s.stopped = false
}
func run(command string) error {
diff --git a/vendor/src/gopkg.in/mgo.v2/testdb/client.pem b/vendor/src/gopkg.in/mgo.v2/testdb/client.pem
index cc57eec7aae..93aed3556e9 100644
--- a/vendor/src/gopkg.in/mgo.v2/testdb/client.pem
+++ b/vendor/src/gopkg.in/mgo.v2/testdb/client.pem
@@ -1,44 +1,57 @@
+To regenerate the key:
+
+ openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key
+ cat server.key server.crt > server.pem
+ openssl genrsa -out client.key 2048
+ openssl req -key client.key -new -out client.req
+ openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt
+ cat client.key client.crt > client.pem
+
-----BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA
-sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD
-KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp
-peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y
-lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN
-LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS
-J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT
-X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT
-AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ
-Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr
-GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB
-TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX
-1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV
-YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw
-9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR
-SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx
-W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7
-PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g
-Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu
-T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL
-Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh
-jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh
-z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57
-RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6
-DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ==
+MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
+wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
+r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
+Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
+KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
+Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
+La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
+KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
+bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
+Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
+Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
+QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
+DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
+QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
+Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
+jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
+K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
+HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
+Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
+xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
+28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
+ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
+4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
+I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM
-MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE
-CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN
-MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD
-VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD
-Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX
-xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ
-N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/
-DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV
-ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV
-B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip
-08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG
-SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/
-rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk
-3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP
+MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
+BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
+cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
+OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
+DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
+b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
+4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
+616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
+AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
+7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
+Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
+l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
+CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
+DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
+PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
+OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
+/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
+z3A=
-----END CERTIFICATE-----
+
diff --git a/vendor/src/gopkg.in/mgo.v2/testdb/dropall.js b/vendor/src/gopkg.in/mgo.v2/testdb/dropall.js
index 2059349db74..7fa39d112e1 100644
--- a/vendor/src/gopkg.in/mgo.v2/testdb/dropall.js
+++ b/vendor/src/gopkg.in/mgo.v2/testdb/dropall.js
@@ -60,7 +60,7 @@ for (var i in ports) {
}
function notMaster(result) {
- return typeof result.errmsg != "undefined" && result.errmsg.indexOf("not master") >= 0
+ return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found"))
}
// vim:ts=4:sw=4:et
diff --git a/vendor/src/gopkg.in/mgo.v2/testdb/init.js b/vendor/src/gopkg.in/mgo.v2/testdb/init.js
index 8e5d801151d..ceb75a5e4a0 100644
--- a/vendor/src/gopkg.in/mgo.v2/testdb/init.js
+++ b/vendor/src/gopkg.in/mgo.v2/testdb/init.js
@@ -58,14 +58,30 @@ function configAuth() {
addrs.push("127.0.0.1:40003")
}
for (var i in addrs) {
+ print("Configuring auth for", addrs[i])
var db = new Mongo(addrs[i]).getDB("admin")
var v = db.serverBuildInfo().versionArray
+ var timedOut = false
if (v < [2, 5]) {
db.addUser("root", "rapadura")
} else {
- db.createUser({user: "root", pwd: "rapadura", roles: ["root"]})
+ try {
+ db.createUser({user: "root", pwd: "rapadura", roles: ["root"]})
+ } catch (err) {
+ // 3.2 consistently fails replication of creds on 40031 (config server)
+ print("createUser command returned an error: " + err)
+ if (String(err).indexOf("timed out") >= 0) {
+ timedOut = true;
+ }
+ }
+ }
+ for (var i = 0; i < 60; i++) {
+ var ok = db.auth("root", "rapadura")
+ if (ok || !timedOut) {
+ break
+ }
+ sleep(1000);
}
- db.auth("root", "rapadura")
if (v >= [2, 6]) {
db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
} else if (v >= [2, 4]) {
@@ -79,14 +95,21 @@ function configAuth() {
function countHealthy(rs) {
var status = rs.runCommand({replSetGetStatus: 1})
var count = 0
+ var primary = 0
if (typeof status.members != "undefined") {
for (var i = 0; i != status.members.length; i++) {
var m = status.members[i]
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
count += 1
+ if (m.state == 1) {
+ primary = 1
+ }
}
}
}
+ if (primary == 0) {
+ count = 0
+ }
return count
}
@@ -96,7 +119,6 @@ for (var i = 0; i != 60; i++) {
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
print("Replica sets have", count, "healthy nodes.")
if (count == totalRSMembers) {
- sleep(2000)
configShards()
configAuth()
quit(0)
diff --git a/vendor/src/gopkg.in/mgo.v2/testdb/server.pem b/vendor/src/gopkg.in/mgo.v2/testdb/server.pem
index 16fbef16b59..487b92d66b8 100644
--- a/vendor/src/gopkg.in/mgo.v2/testdb/server.pem
+++ b/vendor/src/gopkg.in/mgo.v2/testdb/server.pem
@@ -1,33 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
+Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
+mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
+xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
+YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
+ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
+uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
+wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
+MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
+wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
+yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
+eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
+ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
+tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
+xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
+MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
+Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
+IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
+Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
+QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
+GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
+4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
+ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
+1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
+9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
+SruEA1+5bfBRMW0P+h7Qfe4=
+-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV
-BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP
-MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx
-MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN
-R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx
-EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
-pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0
-fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR
-q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV
-HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB
-rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x
-DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ
-BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI
-hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO
-7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC
-sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE=
+MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
+BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
+MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
+ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
+A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
+cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
+6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
+IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
+GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
+fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
+JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
+OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
+2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
+TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
+nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
+UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
+W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
+yQ==
-----END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4
-ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw
-fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB
-AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd
-SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o
-aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA
-yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG
-WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL
-mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r
-YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+
-PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD
-UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv
-I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA==
------END RSA PRIVATE KEY-----
diff --git a/vendor/src/gopkg.in/mgo.v2/testdb/setup.sh b/vendor/src/gopkg.in/mgo.v2/testdb/setup.sh
index 317e8e5ab39..a121847e3fb 100755
--- a/vendor/src/gopkg.in/mgo.v2/testdb/setup.sh
+++ b/vendor/src/gopkg.in/mgo.v2/testdb/setup.sh
@@ -15,7 +15,7 @@ start() {
echo "Running supervisord..."
supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 )
echo "Supervisord is up, starting $COUNT processes..."
- for i in $(seq 10); do
+ for i in $(seq 30); do
RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ')
echo "$RUNNING processes running..."
if [ x$COUNT = x$RUNNING ]; then
diff --git a/vendor/src/gopkg.in/mgo.v2/testdb/supervisord.conf b/vendor/src/gopkg.in/mgo.v2/testdb/supervisord.conf
index a4d634ec671..724eaa79ccd 100644
--- a/vendor/src/gopkg.in/mgo.v2/testdb/supervisord.conf
+++ b/vendor/src/gopkg.in/mgo.v2/testdb/supervisord.conf
@@ -20,7 +20,7 @@ command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssiz
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth
[program:db3]
-command = mongod -nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem
+command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem
[program:rs1a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011
@@ -57,9 +57,12 @@ command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssiz
[program:s1]
command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1
+startretries = 10
[program:s2]
command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1
+startretries = 10
[program:s3]
command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile
+startretries = 10
diff --git a/vendor/src/gopkg.in/mgo.v2/testdb/wait.js b/vendor/src/gopkg.in/mgo.v2/testdb/wait.js
index de0d660753b..2735d0e56e5 100644
--- a/vendor/src/gopkg.in/mgo.v2/testdb/wait.js
+++ b/vendor/src/gopkg.in/mgo.v2/testdb/wait.js
@@ -32,20 +32,27 @@ for (var i = 0; i != 60; i++) {
function countHealthy(rs) {
var status = rs.runCommand({replSetGetStatus: 1})
var count = 0
+ var primary = 0
if (typeof status.members != "undefined") {
for (var i = 0; i != status.members.length; i++) {
var m = status.members[i]
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
count += 1
+ if (m.state == 1) {
+ primary = 1
+ }
}
}
}
+ if (primary == 0) {
+ count = 0
+ }
return count
}
var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
-for (var i = 0; i != 60; i++) {
+for (var i = 0; i != 90; i++) {
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
print("Replica sets have", count, "healthy nodes.")
if (count == totalRSMembers) {
@@ -56,3 +63,5 @@ for (var i = 0; i != 60; i++) {
print("Replica sets didn't sync up properly.")
quit(12)
+
+// vim:ts=4:sw=4:et